tracing: Centralize preemptirq tracepoints and unify their usage
[linux-2.6-microblaze.git] / kernel / trace / trace_irqsoff.c
1 /*
2  * trace irqs off critical timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
16
17 #include "trace.h"
18
19 #include <trace/events/preemptirq.h>
20
21 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
22 static struct trace_array               *irqsoff_trace __read_mostly;
23 static int                              tracer_enabled __read_mostly;
24
25 static DEFINE_PER_CPU(int, tracing_cpu);
26
27 static DEFINE_RAW_SPINLOCK(max_trace_lock);
28
29 enum {
30         TRACER_IRQS_OFF         = (1 << 1),
31         TRACER_PREEMPT_OFF      = (1 << 2),
32 };
33
34 static int trace_type __read_mostly;
35
36 static int save_flags;
37
38 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
39 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
40
41 #ifdef CONFIG_PREEMPT_TRACER
42 static inline int
43 preempt_trace(void)
44 {
45         return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
46 }
47 #else
48 # define preempt_trace() (0)
49 #endif
50
51 #ifdef CONFIG_IRQSOFF_TRACER
52 static inline int
53 irq_trace(void)
54 {
55         return ((trace_type & TRACER_IRQS_OFF) &&
56                 irqs_disabled());
57 }
58 #else
59 # define irq_trace() (0)
60 #endif
61
62 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63 static int irqsoff_display_graph(struct trace_array *tr, int set);
64 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
65 #else
66 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
67 {
68         return -EINVAL;
69 }
70 # define is_graph(tr) false
71 #endif
72
73 /*
74  * Sequence count - we record it when starting a measurement and
75  * skip the latency if the sequence has changed - some other section
76  * did a maximum and could disturb our measurement with serial console
77  * printouts, etc. Truly coinciding maximum latencies should be rare
78  * and what happens together happens separately as well, so this doesn't
79  * decrease the validity of the maximum found:
80  */
81 static __cacheline_aligned_in_smp       unsigned long max_sequence;
82
83 #ifdef CONFIG_FUNCTION_TRACER
84 /*
85  * Prologue for the preempt and irqs off function tracers.
86  *
87  * Returns 1 if it is OK to continue, and data->disabled is
88  *            incremented.
89  *         0 if the trace is to be ignored, and data->disabled
90  *            is kept the same.
91  *
92  * Note, this function is also used outside this ifdef but
93  *  inside the #ifdef of the function graph tracer below.
94  *  This is OK, since the function graph tracer is
95  *  dependent on the function tracer.
96  */
97 static int func_prolog_dec(struct trace_array *tr,
98                            struct trace_array_cpu **data,
99                            unsigned long *flags)
100 {
101         long disabled;
102         int cpu;
103
104         /*
105          * Does not matter if we preempt. We test the flags
106          * afterward, to see if irqs are disabled or not.
107          * If we preempt and get a false positive, the flags
108          * test will fail.
109          */
110         cpu = raw_smp_processor_id();
111         if (likely(!per_cpu(tracing_cpu, cpu)))
112                 return 0;
113
114         local_save_flags(*flags);
115         /*
116          * Slight chance to get a false positive on tracing_cpu,
117          * although I'm starting to think there isn't a chance.
118          * Leave this for now just to be paranoid.
119          */
120         if (!irqs_disabled_flags(*flags) && !preempt_count())
121                 return 0;
122
123         *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
124         disabled = atomic_inc_return(&(*data)->disabled);
125
126         if (likely(disabled == 1))
127                 return 1;
128
129         atomic_dec(&(*data)->disabled);
130
131         return 0;
132 }
133
134 /*
135  * irqsoff uses its own tracer function to keep the overhead down:
136  */
137 static void
138 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
139                     struct ftrace_ops *op, struct pt_regs *pt_regs)
140 {
141         struct trace_array *tr = irqsoff_trace;
142         struct trace_array_cpu *data;
143         unsigned long flags;
144
145         if (!func_prolog_dec(tr, &data, &flags))
146                 return;
147
148         trace_function(tr, ip, parent_ip, flags, preempt_count());
149
150         atomic_dec(&data->disabled);
151 }
152 #endif /* CONFIG_FUNCTION_TRACER */
153
154 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
155 static int irqsoff_display_graph(struct trace_array *tr, int set)
156 {
157         int cpu;
158
159         if (!(is_graph(tr) ^ set))
160                 return 0;
161
162         stop_irqsoff_tracer(irqsoff_trace, !set);
163
164         for_each_possible_cpu(cpu)
165                 per_cpu(tracing_cpu, cpu) = 0;
166
167         tr->max_latency = 0;
168         tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
169
170         return start_irqsoff_tracer(irqsoff_trace, set);
171 }
172
173 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
174 {
175         struct trace_array *tr = irqsoff_trace;
176         struct trace_array_cpu *data;
177         unsigned long flags;
178         int ret;
179         int pc;
180
181         if (ftrace_graph_ignore_func(trace))
182                 return 0;
183         /*
184          * Do not trace a function if it's filtered by set_graph_notrace.
185          * Make the index of ret stack negative to indicate that it should
186          * ignore further functions.  But it needs its own ret stack entry
187          * to recover the original index in order to continue tracing after
188          * returning from the function.
189          */
190         if (ftrace_graph_notrace_addr(trace->func))
191                 return 1;
192
193         if (!func_prolog_dec(tr, &data, &flags))
194                 return 0;
195
196         pc = preempt_count();
197         ret = __trace_graph_entry(tr, trace, flags, pc);
198         atomic_dec(&data->disabled);
199
200         return ret;
201 }
202
203 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
204 {
205         struct trace_array *tr = irqsoff_trace;
206         struct trace_array_cpu *data;
207         unsigned long flags;
208         int pc;
209
210         if (!func_prolog_dec(tr, &data, &flags))
211                 return;
212
213         pc = preempt_count();
214         __trace_graph_return(tr, trace, flags, pc);
215         atomic_dec(&data->disabled);
216 }
217
218 static void irqsoff_trace_open(struct trace_iterator *iter)
219 {
220         if (is_graph(iter->tr))
221                 graph_trace_open(iter);
222
223 }
224
225 static void irqsoff_trace_close(struct trace_iterator *iter)
226 {
227         if (iter->private)
228                 graph_trace_close(iter);
229 }
230
231 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
232                             TRACE_GRAPH_PRINT_PROC | \
233                             TRACE_GRAPH_PRINT_ABS_TIME | \
234                             TRACE_GRAPH_PRINT_DURATION)
235
236 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
237 {
238         /*
239          * In graph mode call the graph tracer output function,
240          * otherwise go with the TRACE_FN event handler
241          */
242         if (is_graph(iter->tr))
243                 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
244
245         return TRACE_TYPE_UNHANDLED;
246 }
247
248 static void irqsoff_print_header(struct seq_file *s)
249 {
250         struct trace_array *tr = irqsoff_trace;
251
252         if (is_graph(tr))
253                 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
254         else
255                 trace_default_header(s);
256 }
257
258 static void
259 __trace_function(struct trace_array *tr,
260                  unsigned long ip, unsigned long parent_ip,
261                  unsigned long flags, int pc)
262 {
263         if (is_graph(tr))
264                 trace_graph_function(tr, ip, parent_ip, flags, pc);
265         else
266                 trace_function(tr, ip, parent_ip, flags, pc);
267 }
268
269 #else
270 #define __trace_function trace_function
271
272 #ifdef CONFIG_FUNCTION_TRACER
273 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
274 {
275         return -1;
276 }
277 #endif
278
279 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
280 {
281         return TRACE_TYPE_UNHANDLED;
282 }
283
284 static void irqsoff_trace_open(struct trace_iterator *iter) { }
285 static void irqsoff_trace_close(struct trace_iterator *iter) { }
286
287 #ifdef CONFIG_FUNCTION_TRACER
288 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
289 static void irqsoff_print_header(struct seq_file *s)
290 {
291         trace_default_header(s);
292 }
293 #else
294 static void irqsoff_print_header(struct seq_file *s)
295 {
296         trace_latency_header(s);
297 }
298 #endif /* CONFIG_FUNCTION_TRACER */
299 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
300
301 /*
302  * Should this new latency be reported/recorded?
303  */
304 static bool report_latency(struct trace_array *tr, u64 delta)
305 {
306         if (tracing_thresh) {
307                 if (delta < tracing_thresh)
308                         return false;
309         } else {
310                 if (delta <= tr->max_latency)
311                         return false;
312         }
313         return true;
314 }
315
316 static void
317 check_critical_timing(struct trace_array *tr,
318                       struct trace_array_cpu *data,
319                       unsigned long parent_ip,
320                       int cpu)
321 {
322         u64 T0, T1, delta;
323         unsigned long flags;
324         int pc;
325
326         T0 = data->preempt_timestamp;
327         T1 = ftrace_now(cpu);
328         delta = T1-T0;
329
330         local_save_flags(flags);
331
332         pc = preempt_count();
333
334         if (!report_latency(tr, delta))
335                 goto out;
336
337         raw_spin_lock_irqsave(&max_trace_lock, flags);
338
339         /* check if we are still the max latency */
340         if (!report_latency(tr, delta))
341                 goto out_unlock;
342
343         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
344         /* Skip 5 functions to get to the irq/preempt enable function */
345         __trace_stack(tr, flags, 5, pc);
346
347         if (data->critical_sequence != max_sequence)
348                 goto out_unlock;
349
350         data->critical_end = parent_ip;
351
352         if (likely(!is_tracing_stopped())) {
353                 tr->max_latency = delta;
354                 update_max_tr_single(tr, current, cpu);
355         }
356
357         max_sequence++;
358
359 out_unlock:
360         raw_spin_unlock_irqrestore(&max_trace_lock, flags);
361
362 out:
363         data->critical_sequence = max_sequence;
364         data->preempt_timestamp = ftrace_now(cpu);
365         __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
366 }
367
368 static inline void
369 start_critical_timing(unsigned long ip, unsigned long parent_ip)
370 {
371         int cpu;
372         struct trace_array *tr = irqsoff_trace;
373         struct trace_array_cpu *data;
374         unsigned long flags;
375
376         if (!tracer_enabled || !tracing_is_enabled())
377                 return;
378
379         cpu = raw_smp_processor_id();
380
381         if (per_cpu(tracing_cpu, cpu))
382                 return;
383
384         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
385
386         if (unlikely(!data) || atomic_read(&data->disabled))
387                 return;
388
389         atomic_inc(&data->disabled);
390
391         data->critical_sequence = max_sequence;
392         data->preempt_timestamp = ftrace_now(cpu);
393         data->critical_start = parent_ip ? : ip;
394
395         local_save_flags(flags);
396
397         __trace_function(tr, ip, parent_ip, flags, preempt_count());
398
399         per_cpu(tracing_cpu, cpu) = 1;
400
401         atomic_dec(&data->disabled);
402 }
403
404 static inline void
405 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
406 {
407         int cpu;
408         struct trace_array *tr = irqsoff_trace;
409         struct trace_array_cpu *data;
410         unsigned long flags;
411
412         cpu = raw_smp_processor_id();
413         /* Always clear the tracing cpu on stopping the trace */
414         if (unlikely(per_cpu(tracing_cpu, cpu)))
415                 per_cpu(tracing_cpu, cpu) = 0;
416         else
417                 return;
418
419         if (!tracer_enabled || !tracing_is_enabled())
420                 return;
421
422         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
423
424         if (unlikely(!data) ||
425             !data->critical_start || atomic_read(&data->disabled))
426                 return;
427
428         atomic_inc(&data->disabled);
429
430         local_save_flags(flags);
431         __trace_function(tr, ip, parent_ip, flags, preempt_count());
432         check_critical_timing(tr, data, parent_ip ? : ip, cpu);
433         data->critical_start = 0;
434         atomic_dec(&data->disabled);
435 }
436
437 /* start and stop critical timings used to for stoppage (in idle) */
438 void start_critical_timings(void)
439 {
440         if (preempt_trace() || irq_trace())
441                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
442 }
443 EXPORT_SYMBOL_GPL(start_critical_timings);
444
445 void stop_critical_timings(void)
446 {
447         if (preempt_trace() || irq_trace())
448                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
449 }
450 EXPORT_SYMBOL_GPL(stop_critical_timings);
451
452 #ifdef CONFIG_FUNCTION_TRACER
453 static bool function_enabled;
454
455 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
456 {
457         int ret;
458
459         /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
460         if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
461                 return 0;
462
463         if (graph)
464                 ret = register_ftrace_graph(&irqsoff_graph_return,
465                                             &irqsoff_graph_entry);
466         else
467                 ret = register_ftrace_function(tr->ops);
468
469         if (!ret)
470                 function_enabled = true;
471
472         return ret;
473 }
474
475 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
476 {
477         if (!function_enabled)
478                 return;
479
480         if (graph)
481                 unregister_ftrace_graph();
482         else
483                 unregister_ftrace_function(tr->ops);
484
485         function_enabled = false;
486 }
487
488 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
489 {
490         if (!(mask & TRACE_ITER_FUNCTION))
491                 return 0;
492
493         if (set)
494                 register_irqsoff_function(tr, is_graph(tr), 1);
495         else
496                 unregister_irqsoff_function(tr, is_graph(tr));
497         return 1;
498 }
499 #else
500 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
501 {
502         return 0;
503 }
504 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
505 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
506 {
507         return 0;
508 }
509 #endif /* CONFIG_FUNCTION_TRACER */
510
511 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
512 {
513         struct tracer *tracer = tr->current_trace;
514
515         if (irqsoff_function_set(tr, mask, set))
516                 return 0;
517
518 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
519         if (mask & TRACE_ITER_DISPLAY_GRAPH)
520                 return irqsoff_display_graph(tr, set);
521 #endif
522
523         return trace_keep_overwrite(tracer, mask, set);
524 }
525
526 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
527 {
528         int ret;
529
530         ret = register_irqsoff_function(tr, graph, 0);
531
532         if (!ret && tracing_is_enabled())
533                 tracer_enabled = 1;
534         else
535                 tracer_enabled = 0;
536
537         return ret;
538 }
539
540 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
541 {
542         tracer_enabled = 0;
543
544         unregister_irqsoff_function(tr, graph);
545 }
546
547 static bool irqsoff_busy;
548
549 static int __irqsoff_tracer_init(struct trace_array *tr)
550 {
551         if (irqsoff_busy)
552                 return -EBUSY;
553
554         save_flags = tr->trace_flags;
555
556         /* non overwrite screws up the latency tracers */
557         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
558         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
559
560         tr->max_latency = 0;
561         irqsoff_trace = tr;
562         /* make sure that the tracer is visible */
563         smp_wmb();
564
565         ftrace_init_array_ops(tr, irqsoff_tracer_call);
566
567         /* Only toplevel instance supports graph tracing */
568         if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
569                                       is_graph(tr))))
570                 printk(KERN_ERR "failed to start irqsoff tracer\n");
571
572         irqsoff_busy = true;
573         return 0;
574 }
575
576 static void __irqsoff_tracer_reset(struct trace_array *tr)
577 {
578         int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
579         int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
580
581         stop_irqsoff_tracer(tr, is_graph(tr));
582
583         set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
584         set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
585         ftrace_reset_array_ops(tr);
586
587         irqsoff_busy = false;
588 }
589
590 static void irqsoff_tracer_start(struct trace_array *tr)
591 {
592         tracer_enabled = 1;
593 }
594
595 static void irqsoff_tracer_stop(struct trace_array *tr)
596 {
597         tracer_enabled = 0;
598 }
599
600 #ifdef CONFIG_IRQSOFF_TRACER
601 /*
602  * We are only interested in hardirq on/off events:
603  */
604 static void tracer_hardirqs_on(void *none, unsigned long a0, unsigned long a1)
605 {
606         if (!preempt_trace() && irq_trace())
607                 stop_critical_timing(a0, a1);
608 }
609
610 static void tracer_hardirqs_off(void *none, unsigned long a0, unsigned long a1)
611 {
612         if (!preempt_trace() && irq_trace())
613                 start_critical_timing(a0, a1);
614 }
615
616 static int irqsoff_tracer_init(struct trace_array *tr)
617 {
618         trace_type = TRACER_IRQS_OFF;
619
620         register_trace_irq_disable(tracer_hardirqs_off, NULL);
621         register_trace_irq_enable(tracer_hardirqs_on, NULL);
622         return __irqsoff_tracer_init(tr);
623 }
624
625 static void irqsoff_tracer_reset(struct trace_array *tr)
626 {
627         unregister_trace_irq_disable(tracer_hardirqs_off, NULL);
628         unregister_trace_irq_enable(tracer_hardirqs_on, NULL);
629         __irqsoff_tracer_reset(tr);
630 }
631
632 static struct tracer irqsoff_tracer __read_mostly =
633 {
634         .name           = "irqsoff",
635         .init           = irqsoff_tracer_init,
636         .reset          = irqsoff_tracer_reset,
637         .start          = irqsoff_tracer_start,
638         .stop           = irqsoff_tracer_stop,
639         .print_max      = true,
640         .print_header   = irqsoff_print_header,
641         .print_line     = irqsoff_print_line,
642         .flag_changed   = irqsoff_flag_changed,
643 #ifdef CONFIG_FTRACE_SELFTEST
644         .selftest    = trace_selftest_startup_irqsoff,
645 #endif
646         .open           = irqsoff_trace_open,
647         .close          = irqsoff_trace_close,
648         .allow_instances = true,
649         .use_max_tr     = true,
650 };
651 #endif /*  CONFIG_IRQSOFF_TRACER */
652
653 #ifdef CONFIG_PREEMPT_TRACER
654 static void tracer_preempt_on(void *none, unsigned long a0, unsigned long a1)
655 {
656         if (preempt_trace() && !irq_trace())
657                 stop_critical_timing(a0, a1);
658 }
659
660 static void tracer_preempt_off(void *none, unsigned long a0, unsigned long a1)
661 {
662         if (preempt_trace() && !irq_trace())
663                 start_critical_timing(a0, a1);
664 }
665
666 static int preemptoff_tracer_init(struct trace_array *tr)
667 {
668         trace_type = TRACER_PREEMPT_OFF;
669
670         register_trace_preempt_disable(tracer_preempt_off, NULL);
671         register_trace_preempt_enable(tracer_preempt_on, NULL);
672         return __irqsoff_tracer_init(tr);
673 }
674
675 static void preemptoff_tracer_reset(struct trace_array *tr)
676 {
677         unregister_trace_preempt_disable(tracer_preempt_off, NULL);
678         unregister_trace_preempt_enable(tracer_preempt_on, NULL);
679         __irqsoff_tracer_reset(tr);
680 }
681
682 static struct tracer preemptoff_tracer __read_mostly =
683 {
684         .name           = "preemptoff",
685         .init           = preemptoff_tracer_init,
686         .reset          = preemptoff_tracer_reset,
687         .start          = irqsoff_tracer_start,
688         .stop           = irqsoff_tracer_stop,
689         .print_max      = true,
690         .print_header   = irqsoff_print_header,
691         .print_line     = irqsoff_print_line,
692         .flag_changed   = irqsoff_flag_changed,
693 #ifdef CONFIG_FTRACE_SELFTEST
694         .selftest    = trace_selftest_startup_preemptoff,
695 #endif
696         .open           = irqsoff_trace_open,
697         .close          = irqsoff_trace_close,
698         .allow_instances = true,
699         .use_max_tr     = true,
700 };
701 #endif /* CONFIG_PREEMPT_TRACER */
702
703 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
704
705 static int preemptirqsoff_tracer_init(struct trace_array *tr)
706 {
707         trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
708
709         register_trace_irq_disable(tracer_hardirqs_off, NULL);
710         register_trace_irq_enable(tracer_hardirqs_on, NULL);
711         register_trace_preempt_disable(tracer_preempt_off, NULL);
712         register_trace_preempt_enable(tracer_preempt_on, NULL);
713
714         return __irqsoff_tracer_init(tr);
715 }
716
717 static void preemptirqsoff_tracer_reset(struct trace_array *tr)
718 {
719         unregister_trace_irq_disable(tracer_hardirqs_off, NULL);
720         unregister_trace_irq_enable(tracer_hardirqs_on, NULL);
721         unregister_trace_preempt_disable(tracer_preempt_off, NULL);
722         unregister_trace_preempt_enable(tracer_preempt_on, NULL);
723
724         __irqsoff_tracer_reset(tr);
725 }
726
727 static struct tracer preemptirqsoff_tracer __read_mostly =
728 {
729         .name           = "preemptirqsoff",
730         .init           = preemptirqsoff_tracer_init,
731         .reset          = preemptirqsoff_tracer_reset,
732         .start          = irqsoff_tracer_start,
733         .stop           = irqsoff_tracer_stop,
734         .print_max      = true,
735         .print_header   = irqsoff_print_header,
736         .print_line     = irqsoff_print_line,
737         .flag_changed   = irqsoff_flag_changed,
738 #ifdef CONFIG_FTRACE_SELFTEST
739         .selftest    = trace_selftest_startup_preemptirqsoff,
740 #endif
741         .open           = irqsoff_trace_open,
742         .close          = irqsoff_trace_close,
743         .allow_instances = true,
744         .use_max_tr     = true,
745 };
746 #endif
747
748 __init static int init_irqsoff_tracer(void)
749 {
750 #ifdef CONFIG_IRQSOFF_TRACER
751         register_tracer(&irqsoff_tracer);
752 #endif
753 #ifdef CONFIG_PREEMPT_TRACER
754         register_tracer(&preemptoff_tracer);
755 #endif
756 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
757         register_tracer(&preemptirqsoff_tracer);
758 #endif
759
760         return 0;
761 }
762 core_initcall(init_irqsoff_tracer);
763 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */