2 * trace irqs off critical timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * From code in the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
19 #include <trace/events/preemptirq.h>
21 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
22 static struct trace_array *irqsoff_trace __read_mostly;
23 static int tracer_enabled __read_mostly;
25 static DEFINE_PER_CPU(int, tracing_cpu);
27 static DEFINE_RAW_SPINLOCK(max_trace_lock);
30 TRACER_IRQS_OFF = (1 << 1),
31 TRACER_PREEMPT_OFF = (1 << 2),
34 static int trace_type __read_mostly;
36 static int save_flags;
38 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
39 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
41 #ifdef CONFIG_PREEMPT_TRACER
45 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
48 # define preempt_trace() (0)
51 #ifdef CONFIG_IRQSOFF_TRACER
55 return ((trace_type & TRACER_IRQS_OFF) &&
59 # define irq_trace() (0)
62 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63 static int irqsoff_display_graph(struct trace_array *tr, int set);
64 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
66 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
70 # define is_graph(tr) false
74 * Sequence count - we record it when starting a measurement and
75 * skip the latency if the sequence has changed - some other section
76 * did a maximum and could disturb our measurement with serial console
77 * printouts, etc. Truly coinciding maximum latencies should be rare
78 * and what happens together happens separately as well, so this doesn't
79 * decrease the validity of the maximum found:
81 static __cacheline_aligned_in_smp unsigned long max_sequence;
83 #ifdef CONFIG_FUNCTION_TRACER
85 * Prologue for the preempt and irqs off function tracers.
87 * Returns 1 if it is OK to continue, and data->disabled is
89 * 0 if the trace is to be ignored, and data->disabled
92 * Note, this function is also used outside this ifdef but
93 * inside the #ifdef of the function graph tracer below.
94 * This is OK, since the function graph tracer is
95 * dependent on the function tracer.
97 static int func_prolog_dec(struct trace_array *tr,
98 struct trace_array_cpu **data,
105 * Does not matter if we preempt. We test the flags
106 * afterward, to see if irqs are disabled or not.
107 * If we preempt and get a false positive, the flags
110 cpu = raw_smp_processor_id();
111 if (likely(!per_cpu(tracing_cpu, cpu)))
114 local_save_flags(*flags);
116 * Slight chance to get a false positive on tracing_cpu,
117 * although I'm starting to think there isn't a chance.
118 * Leave this for now just to be paranoid.
120 if (!irqs_disabled_flags(*flags) && !preempt_count())
123 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
124 disabled = atomic_inc_return(&(*data)->disabled);
126 if (likely(disabled == 1))
129 atomic_dec(&(*data)->disabled);
135 * irqsoff uses its own tracer function to keep the overhead down:
138 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
139 struct ftrace_ops *op, struct pt_regs *pt_regs)
141 struct trace_array *tr = irqsoff_trace;
142 struct trace_array_cpu *data;
145 if (!func_prolog_dec(tr, &data, &flags))
148 trace_function(tr, ip, parent_ip, flags, preempt_count());
150 atomic_dec(&data->disabled);
152 #endif /* CONFIG_FUNCTION_TRACER */
154 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
155 static int irqsoff_display_graph(struct trace_array *tr, int set)
159 if (!(is_graph(tr) ^ set))
162 stop_irqsoff_tracer(irqsoff_trace, !set);
164 for_each_possible_cpu(cpu)
165 per_cpu(tracing_cpu, cpu) = 0;
168 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
170 return start_irqsoff_tracer(irqsoff_trace, set);
173 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
175 struct trace_array *tr = irqsoff_trace;
176 struct trace_array_cpu *data;
181 if (ftrace_graph_ignore_func(trace))
184 * Do not trace a function if it's filtered by set_graph_notrace.
185 * Make the index of ret stack negative to indicate that it should
186 * ignore further functions. But it needs its own ret stack entry
187 * to recover the original index in order to continue tracing after
188 * returning from the function.
190 if (ftrace_graph_notrace_addr(trace->func))
193 if (!func_prolog_dec(tr, &data, &flags))
196 pc = preempt_count();
197 ret = __trace_graph_entry(tr, trace, flags, pc);
198 atomic_dec(&data->disabled);
203 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
205 struct trace_array *tr = irqsoff_trace;
206 struct trace_array_cpu *data;
210 if (!func_prolog_dec(tr, &data, &flags))
213 pc = preempt_count();
214 __trace_graph_return(tr, trace, flags, pc);
215 atomic_dec(&data->disabled);
218 static void irqsoff_trace_open(struct trace_iterator *iter)
220 if (is_graph(iter->tr))
221 graph_trace_open(iter);
225 static void irqsoff_trace_close(struct trace_iterator *iter)
228 graph_trace_close(iter);
231 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
232 TRACE_GRAPH_PRINT_PROC | \
233 TRACE_GRAPH_PRINT_ABS_TIME | \
234 TRACE_GRAPH_PRINT_DURATION)
236 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
239 * In graph mode call the graph tracer output function,
240 * otherwise go with the TRACE_FN event handler
242 if (is_graph(iter->tr))
243 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
245 return TRACE_TYPE_UNHANDLED;
248 static void irqsoff_print_header(struct seq_file *s)
250 struct trace_array *tr = irqsoff_trace;
253 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
255 trace_default_header(s);
259 __trace_function(struct trace_array *tr,
260 unsigned long ip, unsigned long parent_ip,
261 unsigned long flags, int pc)
264 trace_graph_function(tr, ip, parent_ip, flags, pc);
266 trace_function(tr, ip, parent_ip, flags, pc);
270 #define __trace_function trace_function
272 #ifdef CONFIG_FUNCTION_TRACER
273 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
279 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
281 return TRACE_TYPE_UNHANDLED;
284 static void irqsoff_trace_open(struct trace_iterator *iter) { }
285 static void irqsoff_trace_close(struct trace_iterator *iter) { }
287 #ifdef CONFIG_FUNCTION_TRACER
288 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
289 static void irqsoff_print_header(struct seq_file *s)
291 trace_default_header(s);
294 static void irqsoff_print_header(struct seq_file *s)
296 trace_latency_header(s);
298 #endif /* CONFIG_FUNCTION_TRACER */
299 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
302 * Should this new latency be reported/recorded?
304 static bool report_latency(struct trace_array *tr, u64 delta)
306 if (tracing_thresh) {
307 if (delta < tracing_thresh)
310 if (delta <= tr->max_latency)
317 check_critical_timing(struct trace_array *tr,
318 struct trace_array_cpu *data,
319 unsigned long parent_ip,
326 T0 = data->preempt_timestamp;
327 T1 = ftrace_now(cpu);
330 local_save_flags(flags);
332 pc = preempt_count();
334 if (!report_latency(tr, delta))
337 raw_spin_lock_irqsave(&max_trace_lock, flags);
339 /* check if we are still the max latency */
340 if (!report_latency(tr, delta))
343 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
344 /* Skip 5 functions to get to the irq/preempt enable function */
345 __trace_stack(tr, flags, 5, pc);
347 if (data->critical_sequence != max_sequence)
350 data->critical_end = parent_ip;
352 if (likely(!is_tracing_stopped())) {
353 tr->max_latency = delta;
354 update_max_tr_single(tr, current, cpu);
360 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
363 data->critical_sequence = max_sequence;
364 data->preempt_timestamp = ftrace_now(cpu);
365 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
369 start_critical_timing(unsigned long ip, unsigned long parent_ip)
372 struct trace_array *tr = irqsoff_trace;
373 struct trace_array_cpu *data;
376 if (!tracer_enabled || !tracing_is_enabled())
379 cpu = raw_smp_processor_id();
381 if (per_cpu(tracing_cpu, cpu))
384 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
386 if (unlikely(!data) || atomic_read(&data->disabled))
389 atomic_inc(&data->disabled);
391 data->critical_sequence = max_sequence;
392 data->preempt_timestamp = ftrace_now(cpu);
393 data->critical_start = parent_ip ? : ip;
395 local_save_flags(flags);
397 __trace_function(tr, ip, parent_ip, flags, preempt_count());
399 per_cpu(tracing_cpu, cpu) = 1;
401 atomic_dec(&data->disabled);
405 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
408 struct trace_array *tr = irqsoff_trace;
409 struct trace_array_cpu *data;
412 cpu = raw_smp_processor_id();
413 /* Always clear the tracing cpu on stopping the trace */
414 if (unlikely(per_cpu(tracing_cpu, cpu)))
415 per_cpu(tracing_cpu, cpu) = 0;
419 if (!tracer_enabled || !tracing_is_enabled())
422 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
424 if (unlikely(!data) ||
425 !data->critical_start || atomic_read(&data->disabled))
428 atomic_inc(&data->disabled);
430 local_save_flags(flags);
431 __trace_function(tr, ip, parent_ip, flags, preempt_count());
432 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
433 data->critical_start = 0;
434 atomic_dec(&data->disabled);
437 /* start and stop critical timings used to for stoppage (in idle) */
438 void start_critical_timings(void)
440 if (preempt_trace() || irq_trace())
441 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
443 EXPORT_SYMBOL_GPL(start_critical_timings);
445 void stop_critical_timings(void)
447 if (preempt_trace() || irq_trace())
448 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
450 EXPORT_SYMBOL_GPL(stop_critical_timings);
452 #ifdef CONFIG_FUNCTION_TRACER
453 static bool function_enabled;
455 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
459 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
460 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
464 ret = register_ftrace_graph(&irqsoff_graph_return,
465 &irqsoff_graph_entry);
467 ret = register_ftrace_function(tr->ops);
470 function_enabled = true;
475 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
477 if (!function_enabled)
481 unregister_ftrace_graph();
483 unregister_ftrace_function(tr->ops);
485 function_enabled = false;
488 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
490 if (!(mask & TRACE_ITER_FUNCTION))
494 register_irqsoff_function(tr, is_graph(tr), 1);
496 unregister_irqsoff_function(tr, is_graph(tr));
500 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
504 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
505 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
509 #endif /* CONFIG_FUNCTION_TRACER */
511 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
513 struct tracer *tracer = tr->current_trace;
515 if (irqsoff_function_set(tr, mask, set))
518 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
519 if (mask & TRACE_ITER_DISPLAY_GRAPH)
520 return irqsoff_display_graph(tr, set);
523 return trace_keep_overwrite(tracer, mask, set);
526 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
530 ret = register_irqsoff_function(tr, graph, 0);
532 if (!ret && tracing_is_enabled())
540 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
544 unregister_irqsoff_function(tr, graph);
547 static bool irqsoff_busy;
549 static int __irqsoff_tracer_init(struct trace_array *tr)
554 save_flags = tr->trace_flags;
556 /* non overwrite screws up the latency tracers */
557 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
558 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
562 /* make sure that the tracer is visible */
565 ftrace_init_array_ops(tr, irqsoff_tracer_call);
567 /* Only toplevel instance supports graph tracing */
568 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
570 printk(KERN_ERR "failed to start irqsoff tracer\n");
576 static void __irqsoff_tracer_reset(struct trace_array *tr)
578 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
579 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
581 stop_irqsoff_tracer(tr, is_graph(tr));
583 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
584 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
585 ftrace_reset_array_ops(tr);
587 irqsoff_busy = false;
590 static void irqsoff_tracer_start(struct trace_array *tr)
595 static void irqsoff_tracer_stop(struct trace_array *tr)
600 #ifdef CONFIG_IRQSOFF_TRACER
602 * We are only interested in hardirq on/off events:
604 static void tracer_hardirqs_on(void *none, unsigned long a0, unsigned long a1)
606 if (!preempt_trace() && irq_trace())
607 stop_critical_timing(a0, a1);
610 static void tracer_hardirqs_off(void *none, unsigned long a0, unsigned long a1)
612 if (!preempt_trace() && irq_trace())
613 start_critical_timing(a0, a1);
616 static int irqsoff_tracer_init(struct trace_array *tr)
618 trace_type = TRACER_IRQS_OFF;
620 register_trace_irq_disable(tracer_hardirqs_off, NULL);
621 register_trace_irq_enable(tracer_hardirqs_on, NULL);
622 return __irqsoff_tracer_init(tr);
625 static void irqsoff_tracer_reset(struct trace_array *tr)
627 unregister_trace_irq_disable(tracer_hardirqs_off, NULL);
628 unregister_trace_irq_enable(tracer_hardirqs_on, NULL);
629 __irqsoff_tracer_reset(tr);
632 static struct tracer irqsoff_tracer __read_mostly =
635 .init = irqsoff_tracer_init,
636 .reset = irqsoff_tracer_reset,
637 .start = irqsoff_tracer_start,
638 .stop = irqsoff_tracer_stop,
640 .print_header = irqsoff_print_header,
641 .print_line = irqsoff_print_line,
642 .flag_changed = irqsoff_flag_changed,
643 #ifdef CONFIG_FTRACE_SELFTEST
644 .selftest = trace_selftest_startup_irqsoff,
646 .open = irqsoff_trace_open,
647 .close = irqsoff_trace_close,
648 .allow_instances = true,
651 #endif /* CONFIG_IRQSOFF_TRACER */
653 #ifdef CONFIG_PREEMPT_TRACER
654 static void tracer_preempt_on(void *none, unsigned long a0, unsigned long a1)
656 if (preempt_trace() && !irq_trace())
657 stop_critical_timing(a0, a1);
660 static void tracer_preempt_off(void *none, unsigned long a0, unsigned long a1)
662 if (preempt_trace() && !irq_trace())
663 start_critical_timing(a0, a1);
666 static int preemptoff_tracer_init(struct trace_array *tr)
668 trace_type = TRACER_PREEMPT_OFF;
670 register_trace_preempt_disable(tracer_preempt_off, NULL);
671 register_trace_preempt_enable(tracer_preempt_on, NULL);
672 return __irqsoff_tracer_init(tr);
675 static void preemptoff_tracer_reset(struct trace_array *tr)
677 unregister_trace_preempt_disable(tracer_preempt_off, NULL);
678 unregister_trace_preempt_enable(tracer_preempt_on, NULL);
679 __irqsoff_tracer_reset(tr);
682 static struct tracer preemptoff_tracer __read_mostly =
684 .name = "preemptoff",
685 .init = preemptoff_tracer_init,
686 .reset = preemptoff_tracer_reset,
687 .start = irqsoff_tracer_start,
688 .stop = irqsoff_tracer_stop,
690 .print_header = irqsoff_print_header,
691 .print_line = irqsoff_print_line,
692 .flag_changed = irqsoff_flag_changed,
693 #ifdef CONFIG_FTRACE_SELFTEST
694 .selftest = trace_selftest_startup_preemptoff,
696 .open = irqsoff_trace_open,
697 .close = irqsoff_trace_close,
698 .allow_instances = true,
701 #endif /* CONFIG_PREEMPT_TRACER */
703 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
705 static int preemptirqsoff_tracer_init(struct trace_array *tr)
707 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
709 register_trace_irq_disable(tracer_hardirqs_off, NULL);
710 register_trace_irq_enable(tracer_hardirqs_on, NULL);
711 register_trace_preempt_disable(tracer_preempt_off, NULL);
712 register_trace_preempt_enable(tracer_preempt_on, NULL);
714 return __irqsoff_tracer_init(tr);
717 static void preemptirqsoff_tracer_reset(struct trace_array *tr)
719 unregister_trace_irq_disable(tracer_hardirqs_off, NULL);
720 unregister_trace_irq_enable(tracer_hardirqs_on, NULL);
721 unregister_trace_preempt_disable(tracer_preempt_off, NULL);
722 unregister_trace_preempt_enable(tracer_preempt_on, NULL);
724 __irqsoff_tracer_reset(tr);
727 static struct tracer preemptirqsoff_tracer __read_mostly =
729 .name = "preemptirqsoff",
730 .init = preemptirqsoff_tracer_init,
731 .reset = preemptirqsoff_tracer_reset,
732 .start = irqsoff_tracer_start,
733 .stop = irqsoff_tracer_stop,
735 .print_header = irqsoff_print_header,
736 .print_line = irqsoff_print_line,
737 .flag_changed = irqsoff_flag_changed,
738 #ifdef CONFIG_FTRACE_SELFTEST
739 .selftest = trace_selftest_startup_preemptirqsoff,
741 .open = irqsoff_trace_open,
742 .close = irqsoff_trace_close,
743 .allow_instances = true,
748 __init static int init_irqsoff_tracer(void)
750 #ifdef CONFIG_IRQSOFF_TRACER
751 register_tracer(&irqsoff_tracer);
753 #ifdef CONFIG_PREEMPT_TRACER
754 register_tracer(&preemptoff_tracer);
756 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
757 register_tracer(&preemptirqsoff_tracer);
762 core_initcall(init_irqsoff_tracer);
763 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */