1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/percpu.h>
12 #include <linux/slab.h>
13 #include <linux/static_key.h>
14 #include <linux/interrupt.h>
15 #include <linux/idr.h>
16 #include <linux/irq.h>
17 #include <linux/math64.h>
19 #include <trace/events/irq.h>
21 #include "internals.h"
23 DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
25 DEFINE_PER_CPU(struct irq_timings, irq_timings);
37 static DEFINE_IDR(irqt_stats);
39 void irq_timings_enable(void)
41 static_branch_enable(&irq_timing_enabled);
44 void irq_timings_disable(void)
46 static_branch_disable(&irq_timing_enabled);
50 * irqs_update - update the irq timing statistics with a new timestamp
52 * @irqs: an irqt_stat struct pointer
53 * @ts: the new timestamp
55 * The statistics are computed online, in other words, the code is
56 * designed to compute the statistics on a stream of values rather
57 * than doing multiple passes on the values to compute the average,
58 * then the variance. The integer division introduces a loss of
59 * precision but with an acceptable error margin regarding the results
60 * we would have with the double floating precision: we are dealing
61 * with nanosec, so big numbers, consequently the mantisse is
62 * negligeable, especially when converting the time in usec
65 * The computation happens at idle time. When the CPU is not idle, the
66 * interrupts' timestamps are stored in the circular buffer, when the
67 * CPU goes idle and this routine is called, all the buffer's values
68 * are injected in the statistical model continuying to extend the
69 * statistics from the previous busy-idle cycle.
71 * The observations showed a device will trigger a burst of periodic
72 * interrupts followed by one or two peaks of longer time, for
73 * instance when a SD card device flushes its cache, then the periodic
74 * intervals occur again. A one second inactivity period resets the
75 * stats, that gives us the certitude the statistical values won't
76 * exceed 1x10^9, thus the computation won't overflow.
78 * Basically, the purpose of the algorithm is to watch the periodic
79 * interrupts and eliminate the peaks.
81 * An interrupt is considered periodically stable if the interval of
82 * its occurences follow the normal distribution, thus the values
85 * avg - 3 x stddev < value < avg + 3 x stddev
87 * Which can be simplified to:
89 * -3 x stddev < value - avg < 3 x stddev
91 * abs(value - avg) < 3 x stddev
93 * In order to save a costly square root computation, we use the
94 * variance. For the record, stddev = sqrt(variance). The equation
97 * abs(value - avg) < 3 x sqrt(variance)
99 * And finally we square it:
101 * (value - avg) ^ 2 < (3 x sqrt(variance)) ^ 2
103 * (value - avg) x (value - avg) < 9 x variance
105 * Statistically speaking, any values out of this interval is
106 * considered as an anomaly and is discarded. However, a normal
107 * distribution appears when the number of samples is 30 (it is the
108 * rule of thumb in statistics, cf. "30 samples" on Internet). When
109 * there are three consecutive anomalies, the statistics are resetted.
112 static void irqs_update(struct irqt_stat *irqs, u64 ts)
114 u64 old_ts = irqs->last_ts;
120 * The timestamps are absolute time values, we need to compute
121 * the timing interval between two interrupts.
126 * The interval type is u64 in order to deal with the same
127 * type in our computation, that prevent mindfuck issues with
128 * overflow, sign and division.
130 interval = ts - old_ts;
133 * The interrupt triggered more than one second apart, that
134 * ends the sequence as predictible for our purpose. In this
135 * case, assume we have the beginning of a sequence and the
136 * timestamp is the first value. As it is impossible to
137 * predict anything at this point, return.
139 * Note the first timestamp of the sequence will always fall
140 * in this test because the old_ts is zero. That is what we
141 * want as we need another timestamp to compute an interval.
143 if (interval >= NSEC_PER_SEC) {
144 memset(irqs, 0, sizeof(*irqs));
150 * Pre-compute the delta with the average as the result is
151 * used several times in this function.
153 diff = interval - irqs->avg;
156 * Increment the number of samples.
161 * Online variance divided by the number of elements if there
162 * is more than one sample. Normally the formula is division
163 * by nr_samples - 1 but we assume the number of element will be
164 * more than 32 and dividing by 32 instead of 31 is enough
167 if (likely(irqs->nr_samples > 1))
168 variance = irqs->variance >> IRQ_TIMINGS_SHIFT;
171 * The rule of thumb in statistics for the normal distribution
172 * is having at least 30 samples in order to have the model to
173 * apply. Values outside the interval are considered as an
176 if ((irqs->nr_samples >= 30) && ((diff * diff) > (9 * variance))) {
178 * After three consecutive anomalies, we reset the
179 * stats as it is no longer stable enough.
181 if (irqs->anomalies++ >= 3) {
182 memset(irqs, 0, sizeof(*irqs));
188 * The anomalies must be consecutives, so at this
189 * point, we reset the anomalies counter.
195 * The interrupt is considered stable enough to try to predict
196 * the next event on it.
201 * Online average algorithm:
203 * new_average = average + ((value - average) / count)
205 * The variance computation depends on the new average
206 * to be computed here first.
209 irqs->avg = irqs->avg + (diff >> IRQ_TIMINGS_SHIFT);
212 * Online variance algorithm:
214 * new_variance = variance + (value - average) x (value - new_average)
216 * Warning: irqs->avg is updated with the line above, hence
217 * 'interval - irqs->avg' is no longer equal to 'diff'
219 irqs->variance = irqs->variance + (diff * (interval - irqs->avg));
222 * Update the next event
224 irqs->next_evt = ts + irqs->avg;
228 * irq_timings_next_event - Return when the next event is supposed to arrive
230 * During the last busy cycle, the number of interrupts is incremented
231 * and stored in the irq_timings structure. This information is
234 * - know if the index in the table wrapped up:
236 * If more than the array size interrupts happened during the
237 * last busy/idle cycle, the index wrapped up and we have to
238 * begin with the next element in the array which is the last one
239 * in the sequence, otherwise it is a the index 0.
241 * - have an indication of the interrupts activity on this CPU
244 * The values are 'consumed' after inserting in the statistical model,
245 * thus the count is reinitialized.
247 * The array of values **must** be browsed in the time direction, the
248 * timestamp must increase between an element and the next one.
250 * Returns a nanosec time based estimation of the earliest interrupt,
253 u64 irq_timings_next_event(u64 now)
255 struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
256 struct irqt_stat *irqs;
257 struct irqt_stat __percpu *s;
258 u64 ts, next_evt = U64_MAX;
262 * This function must be called with the local irq disabled in
263 * order to prevent the timings circular buffer to be updated
264 * while we are reading it.
266 lockdep_assert_irqs_disabled();
269 * Number of elements in the circular buffer: If it happens it
270 * was flushed before, then the number of elements could be
271 * smaller than IRQ_TIMINGS_SIZE, so the count is used,
272 * otherwise the array size is used as we wrapped. The index
273 * begins from zero when we did not wrap. That could be done
274 * in a nicer way with the proper circular array structure
275 * type but with the cost of extra computation in the
276 * interrupt handler hot path. We choose efficiency.
278 * Inject measured irq/timestamp to the statistical model
279 * while decrementing the counter because we consume the data
280 * from our circular buffer.
282 for (i = irqts->count & IRQ_TIMINGS_MASK,
283 irqts->count = min(IRQ_TIMINGS_SIZE, irqts->count);
284 irqts->count > 0; irqts->count--, i = (i + 1) & IRQ_TIMINGS_MASK) {
286 irq = irq_timing_decode(irqts->values[i], &ts);
288 s = idr_find(&irqt_stats, irq);
290 irqs = this_cpu_ptr(s);
291 irqs_update(irqs, ts);
296 * Look in the list of interrupts' statistics, the earliest
299 idr_for_each_entry(&irqt_stats, s, i) {
301 irqs = this_cpu_ptr(s);
306 if (irqs->next_evt <= now) {
311 * This interrupt mustn't use in the future
312 * until new events occur and update the
319 if (irqs->next_evt < next_evt) {
321 next_evt = irqs->next_evt;
328 void irq_timings_free(int irq)
330 struct irqt_stat __percpu *s;
332 s = idr_find(&irqt_stats, irq);
335 idr_remove(&irqt_stats, irq);
339 int irq_timings_alloc(int irq)
341 struct irqt_stat __percpu *s;
345 * Some platforms can have the same private interrupt per cpu,
346 * so this function may be be called several times with the
347 * same interrupt number. Just bail out in case the per cpu
348 * stat structure is already allocated.
350 s = idr_find(&irqt_stats, irq);
354 s = alloc_percpu(*s);
358 idr_preload(GFP_KERNEL);
359 id = idr_alloc(&irqt_stats, s, irq, irq + 1, GFP_NOWAIT);