mm: generalise COW SMC TLB flushing race comment
[linux-2.6-microblaze.git] / kernel / trace / trace_functions.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19
20 #include "trace.h"
21
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26                     struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29                           struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static struct tracer_flags func_flags;
31
32 /* Our option */
33 enum {
34         TRACE_FUNC_OPT_STACK    = 0x1,
35 };
36
37 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
38 {
39         struct ftrace_ops *ops;
40
41         /* The top level array uses the "global_ops" */
42         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
43                 return 0;
44
45         ops = kzalloc(sizeof(*ops), GFP_KERNEL);
46         if (!ops)
47                 return -ENOMEM;
48
49         /* Currently only the non stack version is supported */
50         ops->func = function_trace_call;
51         ops->flags = FTRACE_OPS_FL_PID;
52
53         tr->ops = ops;
54         ops->private = tr;
55
56         return 0;
57 }
58
59 void ftrace_free_ftrace_ops(struct trace_array *tr)
60 {
61         kfree(tr->ops);
62         tr->ops = NULL;
63 }
64
65 int ftrace_create_function_files(struct trace_array *tr,
66                                  struct dentry *parent)
67 {
68         /*
69          * The top level array uses the "global_ops", and the files are
70          * created on boot up.
71          */
72         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
73                 return 0;
74
75         if (!tr->ops)
76                 return -EINVAL;
77
78         ftrace_create_filter_files(tr->ops, parent);
79
80         return 0;
81 }
82
83 void ftrace_destroy_function_files(struct trace_array *tr)
84 {
85         ftrace_destroy_filter_files(tr->ops);
86         ftrace_free_ftrace_ops(tr);
87 }
88
89 static int function_trace_init(struct trace_array *tr)
90 {
91         ftrace_func_t func;
92         /*
93          * Instance trace_arrays get their ops allocated
94          * at instance creation. Unless it failed
95          * the allocation.
96          */
97         if (!tr->ops)
98                 return -ENOMEM;
99
100         /* Currently only the global instance can do stack tracing */
101         if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
102             func_flags.val & TRACE_FUNC_OPT_STACK)
103                 func = function_stack_trace_call;
104         else
105                 func = function_trace_call;
106
107         ftrace_init_array_ops(tr, func);
108
109         tr->array_buffer.cpu = get_cpu();
110         put_cpu();
111
112         tracing_start_cmdline_record();
113         tracing_start_function_trace(tr);
114         return 0;
115 }
116
117 static void function_trace_reset(struct trace_array *tr)
118 {
119         tracing_stop_function_trace(tr);
120         tracing_stop_cmdline_record();
121         ftrace_reset_array_ops(tr);
122 }
123
124 static void function_trace_start(struct trace_array *tr)
125 {
126         tracing_reset_online_cpus(&tr->array_buffer);
127 }
128
129 static void
130 function_trace_call(unsigned long ip, unsigned long parent_ip,
131                     struct ftrace_ops *op, struct ftrace_regs *fregs)
132 {
133         struct trace_array *tr = op->private;
134         struct trace_array_cpu *data;
135         unsigned long flags;
136         int bit;
137         int cpu;
138         int pc;
139
140         if (unlikely(!tr->function_enabled))
141                 return;
142
143         bit = ftrace_test_recursion_trylock(ip, parent_ip);
144         if (bit < 0)
145                 return;
146
147         pc = preempt_count();
148         preempt_disable_notrace();
149
150         cpu = smp_processor_id();
151         data = per_cpu_ptr(tr->array_buffer.data, cpu);
152         if (!atomic_read(&data->disabled)) {
153                 local_save_flags(flags);
154                 trace_function(tr, ip, parent_ip, flags, pc);
155         }
156         ftrace_test_recursion_unlock(bit);
157         preempt_enable_notrace();
158 }
159
160 #ifdef CONFIG_UNWINDER_ORC
161 /*
162  * Skip 2:
163  *
164  *   function_stack_trace_call()
165  *   ftrace_call()
166  */
167 #define STACK_SKIP 2
168 #else
169 /*
170  * Skip 3:
171  *   __trace_stack()
172  *   function_stack_trace_call()
173  *   ftrace_call()
174  */
175 #define STACK_SKIP 3
176 #endif
177
178 static void
179 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
180                           struct ftrace_ops *op, struct ftrace_regs *fregs)
181 {
182         struct trace_array *tr = op->private;
183         struct trace_array_cpu *data;
184         unsigned long flags;
185         long disabled;
186         int cpu;
187         int pc;
188
189         if (unlikely(!tr->function_enabled))
190                 return;
191
192         /*
193          * Need to use raw, since this must be called before the
194          * recursive protection is performed.
195          */
196         local_irq_save(flags);
197         cpu = raw_smp_processor_id();
198         data = per_cpu_ptr(tr->array_buffer.data, cpu);
199         disabled = atomic_inc_return(&data->disabled);
200
201         if (likely(disabled == 1)) {
202                 pc = preempt_count();
203                 trace_function(tr, ip, parent_ip, flags, pc);
204                 __trace_stack(tr, flags, STACK_SKIP, pc);
205         }
206
207         atomic_dec(&data->disabled);
208         local_irq_restore(flags);
209 }
210
211 static struct tracer_opt func_opts[] = {
212 #ifdef CONFIG_STACKTRACE
213         { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
214 #endif
215         { } /* Always set a last empty entry */
216 };
217
218 static struct tracer_flags func_flags = {
219         .val = 0, /* By default: all flags disabled */
220         .opts = func_opts
221 };
222
223 static void tracing_start_function_trace(struct trace_array *tr)
224 {
225         tr->function_enabled = 0;
226         register_ftrace_function(tr->ops);
227         tr->function_enabled = 1;
228 }
229
230 static void tracing_stop_function_trace(struct trace_array *tr)
231 {
232         tr->function_enabled = 0;
233         unregister_ftrace_function(tr->ops);
234 }
235
236 static struct tracer function_trace;
237
238 static int
239 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
240 {
241         switch (bit) {
242         case TRACE_FUNC_OPT_STACK:
243                 /* do nothing if already set */
244                 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
245                         break;
246
247                 /* We can change this flag when not running. */
248                 if (tr->current_trace != &function_trace)
249                         break;
250
251                 unregister_ftrace_function(tr->ops);
252
253                 if (set) {
254                         tr->ops->func = function_stack_trace_call;
255                         register_ftrace_function(tr->ops);
256                 } else {
257                         tr->ops->func = function_trace_call;
258                         register_ftrace_function(tr->ops);
259                 }
260
261                 break;
262         default:
263                 return -EINVAL;
264         }
265
266         return 0;
267 }
268
269 static struct tracer function_trace __tracer_data =
270 {
271         .name           = "function",
272         .init           = function_trace_init,
273         .reset          = function_trace_reset,
274         .start          = function_trace_start,
275         .flags          = &func_flags,
276         .set_flag       = func_set_flag,
277         .allow_instances = true,
278 #ifdef CONFIG_FTRACE_SELFTEST
279         .selftest       = trace_selftest_startup_function,
280 #endif
281 };
282
283 #ifdef CONFIG_DYNAMIC_FTRACE
284 static void update_traceon_count(struct ftrace_probe_ops *ops,
285                                  unsigned long ip,
286                                  struct trace_array *tr, bool on,
287                                  void *data)
288 {
289         struct ftrace_func_mapper *mapper = data;
290         long *count;
291         long old_count;
292
293         /*
294          * Tracing gets disabled (or enabled) once per count.
295          * This function can be called at the same time on multiple CPUs.
296          * It is fine if both disable (or enable) tracing, as disabling
297          * (or enabling) the second time doesn't do anything as the
298          * state of the tracer is already disabled (or enabled).
299          * What needs to be synchronized in this case is that the count
300          * only gets decremented once, even if the tracer is disabled
301          * (or enabled) twice, as the second one is really a nop.
302          *
303          * The memory barriers guarantee that we only decrement the
304          * counter once. First the count is read to a local variable
305          * and a read barrier is used to make sure that it is loaded
306          * before checking if the tracer is in the state we want.
307          * If the tracer is not in the state we want, then the count
308          * is guaranteed to be the old count.
309          *
310          * Next the tracer is set to the state we want (disabled or enabled)
311          * then a write memory barrier is used to make sure that
312          * the new state is visible before changing the counter by
313          * one minus the old counter. This guarantees that another CPU
314          * executing this code will see the new state before seeing
315          * the new counter value, and would not do anything if the new
316          * counter is seen.
317          *
318          * Note, there is no synchronization between this and a user
319          * setting the tracing_on file. But we currently don't care
320          * about that.
321          */
322         count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
323         old_count = *count;
324
325         if (old_count <= 0)
326                 return;
327
328         /* Make sure we see count before checking tracing state */
329         smp_rmb();
330
331         if (on == !!tracer_tracing_is_on(tr))
332                 return;
333
334         if (on)
335                 tracer_tracing_on(tr);
336         else
337                 tracer_tracing_off(tr);
338
339         /* Make sure tracing state is visible before updating count */
340         smp_wmb();
341
342         *count = old_count - 1;
343 }
344
345 static void
346 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
347                      struct trace_array *tr, struct ftrace_probe_ops *ops,
348                      void *data)
349 {
350         update_traceon_count(ops, ip, tr, 1, data);
351 }
352
353 static void
354 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
355                       struct trace_array *tr, struct ftrace_probe_ops *ops,
356                       void *data)
357 {
358         update_traceon_count(ops, ip, tr, 0, data);
359 }
360
361 static void
362 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
363                struct trace_array *tr, struct ftrace_probe_ops *ops,
364                void *data)
365 {
366         if (tracer_tracing_is_on(tr))
367                 return;
368
369         tracer_tracing_on(tr);
370 }
371
372 static void
373 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
374                 struct trace_array *tr, struct ftrace_probe_ops *ops,
375                 void *data)
376 {
377         if (!tracer_tracing_is_on(tr))
378                 return;
379
380         tracer_tracing_off(tr);
381 }
382
383 #ifdef CONFIG_UNWINDER_ORC
384 /*
385  * Skip 3:
386  *
387  *   function_trace_probe_call()
388  *   ftrace_ops_assist_func()
389  *   ftrace_call()
390  */
391 #define FTRACE_STACK_SKIP 3
392 #else
393 /*
394  * Skip 5:
395  *
396  *   __trace_stack()
397  *   ftrace_stacktrace()
398  *   function_trace_probe_call()
399  *   ftrace_ops_assist_func()
400  *   ftrace_call()
401  */
402 #define FTRACE_STACK_SKIP 5
403 #endif
404
405 static __always_inline void trace_stack(struct trace_array *tr)
406 {
407         unsigned long flags;
408         int pc;
409
410         local_save_flags(flags);
411         pc = preempt_count();
412
413         __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
414 }
415
416 static void
417 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
418                   struct trace_array *tr, struct ftrace_probe_ops *ops,
419                   void *data)
420 {
421         trace_stack(tr);
422 }
423
424 static void
425 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
426                         struct trace_array *tr, struct ftrace_probe_ops *ops,
427                         void *data)
428 {
429         struct ftrace_func_mapper *mapper = data;
430         long *count;
431         long old_count;
432         long new_count;
433
434         if (!tracing_is_on())
435                 return;
436
437         /* unlimited? */
438         if (!mapper) {
439                 trace_stack(tr);
440                 return;
441         }
442
443         count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
444
445         /*
446          * Stack traces should only execute the number of times the
447          * user specified in the counter.
448          */
449         do {
450                 old_count = *count;
451
452                 if (!old_count)
453                         return;
454
455                 new_count = old_count - 1;
456                 new_count = cmpxchg(count, old_count, new_count);
457                 if (new_count == old_count)
458                         trace_stack(tr);
459
460                 if (!tracing_is_on())
461                         return;
462
463         } while (new_count != old_count);
464 }
465
466 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
467                         void *data)
468 {
469         struct ftrace_func_mapper *mapper = data;
470         long *count = NULL;
471
472         if (mapper)
473                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
474
475         if (count) {
476                 if (*count <= 0)
477                         return 0;
478                 (*count)--;
479         }
480
481         return 1;
482 }
483
484 static void
485 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
486                   struct trace_array *tr, struct ftrace_probe_ops *ops,
487                   void *data)
488 {
489         if (update_count(ops, ip, data))
490                 ftrace_dump(DUMP_ALL);
491 }
492
493 /* Only dump the current CPU buffer. */
494 static void
495 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
496                      struct trace_array *tr, struct ftrace_probe_ops *ops,
497                      void *data)
498 {
499         if (update_count(ops, ip, data))
500                 ftrace_dump(DUMP_ORIG);
501 }
502
503 static int
504 ftrace_probe_print(const char *name, struct seq_file *m,
505                    unsigned long ip, struct ftrace_probe_ops *ops,
506                    void *data)
507 {
508         struct ftrace_func_mapper *mapper = data;
509         long *count = NULL;
510
511         seq_printf(m, "%ps:%s", (void *)ip, name);
512
513         if (mapper)
514                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
515
516         if (count)
517                 seq_printf(m, ":count=%ld\n", *count);
518         else
519                 seq_puts(m, ":unlimited\n");
520
521         return 0;
522 }
523
524 static int
525 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
526                      struct ftrace_probe_ops *ops,
527                      void *data)
528 {
529         return ftrace_probe_print("traceon", m, ip, ops, data);
530 }
531
532 static int
533 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
534                          struct ftrace_probe_ops *ops, void *data)
535 {
536         return ftrace_probe_print("traceoff", m, ip, ops, data);
537 }
538
539 static int
540 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
541                         struct ftrace_probe_ops *ops, void *data)
542 {
543         return ftrace_probe_print("stacktrace", m, ip, ops, data);
544 }
545
546 static int
547 ftrace_dump_print(struct seq_file *m, unsigned long ip,
548                         struct ftrace_probe_ops *ops, void *data)
549 {
550         return ftrace_probe_print("dump", m, ip, ops, data);
551 }
552
553 static int
554 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
555                         struct ftrace_probe_ops *ops, void *data)
556 {
557         return ftrace_probe_print("cpudump", m, ip, ops, data);
558 }
559
560
561 static int
562 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
563                   unsigned long ip, void *init_data, void **data)
564 {
565         struct ftrace_func_mapper *mapper = *data;
566
567         if (!mapper) {
568                 mapper = allocate_ftrace_func_mapper();
569                 if (!mapper)
570                         return -ENOMEM;
571                 *data = mapper;
572         }
573
574         return ftrace_func_mapper_add_ip(mapper, ip, init_data);
575 }
576
577 static void
578 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
579                   unsigned long ip, void *data)
580 {
581         struct ftrace_func_mapper *mapper = data;
582
583         if (!ip) {
584                 free_ftrace_func_mapper(mapper, NULL);
585                 return;
586         }
587
588         ftrace_func_mapper_remove_ip(mapper, ip);
589 }
590
591 static struct ftrace_probe_ops traceon_count_probe_ops = {
592         .func                   = ftrace_traceon_count,
593         .print                  = ftrace_traceon_print,
594         .init                   = ftrace_count_init,
595         .free                   = ftrace_count_free,
596 };
597
598 static struct ftrace_probe_ops traceoff_count_probe_ops = {
599         .func                   = ftrace_traceoff_count,
600         .print                  = ftrace_traceoff_print,
601         .init                   = ftrace_count_init,
602         .free                   = ftrace_count_free,
603 };
604
605 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
606         .func                   = ftrace_stacktrace_count,
607         .print                  = ftrace_stacktrace_print,
608         .init                   = ftrace_count_init,
609         .free                   = ftrace_count_free,
610 };
611
612 static struct ftrace_probe_ops dump_probe_ops = {
613         .func                   = ftrace_dump_probe,
614         .print                  = ftrace_dump_print,
615         .init                   = ftrace_count_init,
616         .free                   = ftrace_count_free,
617 };
618
619 static struct ftrace_probe_ops cpudump_probe_ops = {
620         .func                   = ftrace_cpudump_probe,
621         .print                  = ftrace_cpudump_print,
622 };
623
624 static struct ftrace_probe_ops traceon_probe_ops = {
625         .func                   = ftrace_traceon,
626         .print                  = ftrace_traceon_print,
627 };
628
629 static struct ftrace_probe_ops traceoff_probe_ops = {
630         .func                   = ftrace_traceoff,
631         .print                  = ftrace_traceoff_print,
632 };
633
634 static struct ftrace_probe_ops stacktrace_probe_ops = {
635         .func                   = ftrace_stacktrace,
636         .print                  = ftrace_stacktrace_print,
637 };
638
639 static int
640 ftrace_trace_probe_callback(struct trace_array *tr,
641                             struct ftrace_probe_ops *ops,
642                             struct ftrace_hash *hash, char *glob,
643                             char *cmd, char *param, int enable)
644 {
645         void *count = (void *)-1;
646         char *number;
647         int ret;
648
649         /* hash funcs only work with set_ftrace_filter */
650         if (!enable)
651                 return -EINVAL;
652
653         if (glob[0] == '!')
654                 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
655
656         if (!param)
657                 goto out_reg;
658
659         number = strsep(&param, ":");
660
661         if (!strlen(number))
662                 goto out_reg;
663
664         /*
665          * We use the callback data field (which is a pointer)
666          * as our counter.
667          */
668         ret = kstrtoul(number, 0, (unsigned long *)&count);
669         if (ret)
670                 return ret;
671
672  out_reg:
673         ret = register_ftrace_function_probe(glob, tr, ops, count);
674
675         return ret < 0 ? ret : 0;
676 }
677
678 static int
679 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
680                             char *glob, char *cmd, char *param, int enable)
681 {
682         struct ftrace_probe_ops *ops;
683
684         if (!tr)
685                 return -ENODEV;
686
687         /* we register both traceon and traceoff to this callback */
688         if (strcmp(cmd, "traceon") == 0)
689                 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
690         else
691                 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
692
693         return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
694                                            param, enable);
695 }
696
697 static int
698 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
699                            char *glob, char *cmd, char *param, int enable)
700 {
701         struct ftrace_probe_ops *ops;
702
703         if (!tr)
704                 return -ENODEV;
705
706         ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
707
708         return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
709                                            param, enable);
710 }
711
712 static int
713 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
714                            char *glob, char *cmd, char *param, int enable)
715 {
716         struct ftrace_probe_ops *ops;
717
718         if (!tr)
719                 return -ENODEV;
720
721         ops = &dump_probe_ops;
722
723         /* Only dump once. */
724         return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
725                                            "1", enable);
726 }
727
728 static int
729 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
730                            char *glob, char *cmd, char *param, int enable)
731 {
732         struct ftrace_probe_ops *ops;
733
734         if (!tr)
735                 return -ENODEV;
736
737         ops = &cpudump_probe_ops;
738
739         /* Only dump once. */
740         return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
741                                            "1", enable);
742 }
743
744 static struct ftrace_func_command ftrace_traceon_cmd = {
745         .name                   = "traceon",
746         .func                   = ftrace_trace_onoff_callback,
747 };
748
749 static struct ftrace_func_command ftrace_traceoff_cmd = {
750         .name                   = "traceoff",
751         .func                   = ftrace_trace_onoff_callback,
752 };
753
754 static struct ftrace_func_command ftrace_stacktrace_cmd = {
755         .name                   = "stacktrace",
756         .func                   = ftrace_stacktrace_callback,
757 };
758
759 static struct ftrace_func_command ftrace_dump_cmd = {
760         .name                   = "dump",
761         .func                   = ftrace_dump_callback,
762 };
763
764 static struct ftrace_func_command ftrace_cpudump_cmd = {
765         .name                   = "cpudump",
766         .func                   = ftrace_cpudump_callback,
767 };
768
769 static int __init init_func_cmd_traceon(void)
770 {
771         int ret;
772
773         ret = register_ftrace_command(&ftrace_traceoff_cmd);
774         if (ret)
775                 return ret;
776
777         ret = register_ftrace_command(&ftrace_traceon_cmd);
778         if (ret)
779                 goto out_free_traceoff;
780
781         ret = register_ftrace_command(&ftrace_stacktrace_cmd);
782         if (ret)
783                 goto out_free_traceon;
784
785         ret = register_ftrace_command(&ftrace_dump_cmd);
786         if (ret)
787                 goto out_free_stacktrace;
788
789         ret = register_ftrace_command(&ftrace_cpudump_cmd);
790         if (ret)
791                 goto out_free_dump;
792
793         return 0;
794
795  out_free_dump:
796         unregister_ftrace_command(&ftrace_dump_cmd);
797  out_free_stacktrace:
798         unregister_ftrace_command(&ftrace_stacktrace_cmd);
799  out_free_traceon:
800         unregister_ftrace_command(&ftrace_traceon_cmd);
801  out_free_traceoff:
802         unregister_ftrace_command(&ftrace_traceoff_cmd);
803
804         return ret;
805 }
806 #else
807 static inline int init_func_cmd_traceon(void)
808 {
809         return 0;
810 }
811 #endif /* CONFIG_DYNAMIC_FTRACE */
812
813 __init int init_function_trace(void)
814 {
815         init_func_cmd_traceon();
816         return register_tracer(&function_trace);
817 }