1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Based on code from the latency_tracer, that is:
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct ftrace_regs *fregs);
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct ftrace_regs *fregs);
31 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 struct ftrace_ops *op, struct ftrace_regs *fregs);
34 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 struct ftrace_ops *op,
36 struct ftrace_regs *fregs);
37 static struct tracer_flags func_flags;
42 TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
43 TRACE_FUNC_OPT_STACK = 0x1,
44 TRACE_FUNC_OPT_NO_REPEATS = 0x2,
46 /* Update this to next highest bit. */
47 TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
50 #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
52 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
54 struct ftrace_ops *ops;
56 /* The top level array uses the "global_ops" */
57 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
60 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
64 /* Currently only the non stack version is supported */
65 ops->func = function_trace_call;
66 ops->flags = FTRACE_OPS_FL_PID;
74 void ftrace_free_ftrace_ops(struct trace_array *tr)
80 int ftrace_create_function_files(struct trace_array *tr,
81 struct dentry *parent)
84 * The top level array uses the "global_ops", and the files are
87 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
93 ftrace_create_filter_files(tr->ops, parent);
98 void ftrace_destroy_function_files(struct trace_array *tr)
100 ftrace_destroy_filter_files(tr->ops);
101 ftrace_free_ftrace_ops(tr);
104 static ftrace_func_t select_trace_function(u32 flags_val)
106 switch (flags_val & TRACE_FUNC_OPT_MASK) {
107 case TRACE_FUNC_NO_OPTS:
108 return function_trace_call;
109 case TRACE_FUNC_OPT_STACK:
110 return function_stack_trace_call;
111 case TRACE_FUNC_OPT_NO_REPEATS:
112 return function_no_repeats_trace_call;
113 case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114 return function_stack_no_repeats_trace_call;
120 static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
122 if (!tr->last_func_repeats &&
123 (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125 if (!tr->last_func_repeats)
132 static int function_trace_init(struct trace_array *tr)
136 * Instance trace_arrays get their ops allocated
137 * at instance creation. Unless it failed
143 func = select_trace_function(func_flags.val);
147 if (!handle_func_repeats(tr, func_flags.val))
150 ftrace_init_array_ops(tr, func);
152 tr->array_buffer.cpu = raw_smp_processor_id();
154 tracing_start_cmdline_record();
155 tracing_start_function_trace(tr);
159 static void function_trace_reset(struct trace_array *tr)
161 tracing_stop_function_trace(tr);
162 tracing_stop_cmdline_record();
163 ftrace_reset_array_ops(tr);
166 static void function_trace_start(struct trace_array *tr)
168 tracing_reset_online_cpus(&tr->array_buffer);
172 function_trace_call(unsigned long ip, unsigned long parent_ip,
173 struct ftrace_ops *op, struct ftrace_regs *fregs)
175 struct trace_array *tr = op->private;
176 struct trace_array_cpu *data;
177 unsigned int trace_ctx;
181 if (unlikely(!tr->function_enabled))
184 bit = ftrace_test_recursion_trylock(ip, parent_ip);
188 trace_ctx = tracing_gen_ctx();
189 preempt_disable_notrace();
191 cpu = smp_processor_id();
192 data = per_cpu_ptr(tr->array_buffer.data, cpu);
193 if (!atomic_read(&data->disabled))
194 trace_function(tr, ip, parent_ip, trace_ctx);
196 ftrace_test_recursion_unlock(bit);
197 preempt_enable_notrace();
200 #ifdef CONFIG_UNWINDER_ORC
204 * function_stack_trace_call()
212 * function_stack_trace_call()
219 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
220 struct ftrace_ops *op, struct ftrace_regs *fregs)
222 struct trace_array *tr = op->private;
223 struct trace_array_cpu *data;
227 unsigned int trace_ctx;
229 if (unlikely(!tr->function_enabled))
233 * Need to use raw, since this must be called before the
234 * recursive protection is performed.
236 local_irq_save(flags);
237 cpu = raw_smp_processor_id();
238 data = per_cpu_ptr(tr->array_buffer.data, cpu);
239 disabled = atomic_inc_return(&data->disabled);
241 if (likely(disabled == 1)) {
242 trace_ctx = tracing_gen_ctx_flags(flags);
243 trace_function(tr, ip, parent_ip, trace_ctx);
244 __trace_stack(tr, trace_ctx, STACK_SKIP);
247 atomic_dec(&data->disabled);
248 local_irq_restore(flags);
251 static inline bool is_repeat_check(struct trace_array *tr,
252 struct trace_func_repeats *last_info,
253 unsigned long ip, unsigned long parent_ip)
255 if (last_info->ip == ip &&
256 last_info->parent_ip == parent_ip &&
257 last_info->count < U16_MAX) {
258 last_info->ts_last_call =
259 ring_buffer_time_stamp(tr->array_buffer.buffer);
267 static inline void process_repeats(struct trace_array *tr,
268 unsigned long ip, unsigned long parent_ip,
269 struct trace_func_repeats *last_info,
270 unsigned int trace_ctx)
272 if (last_info->count) {
273 trace_last_func_repeats(tr, last_info, trace_ctx);
274 last_info->count = 0;
278 last_info->parent_ip = parent_ip;
282 function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
283 struct ftrace_ops *op,
284 struct ftrace_regs *fregs)
286 struct trace_func_repeats *last_info;
287 struct trace_array *tr = op->private;
288 struct trace_array_cpu *data;
289 unsigned int trace_ctx;
294 if (unlikely(!tr->function_enabled))
297 bit = ftrace_test_recursion_trylock(ip, parent_ip);
301 preempt_disable_notrace();
303 cpu = smp_processor_id();
304 data = per_cpu_ptr(tr->array_buffer.data, cpu);
305 if (atomic_read(&data->disabled))
309 * An interrupt may happen at any place here. But as far as I can see,
310 * the only damage that this can cause is to mess up the repetition
311 * counter without valuable data being lost.
312 * TODO: think about a solution that is better than just hoping to be
315 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
316 if (is_repeat_check(tr, last_info, ip, parent_ip))
319 local_save_flags(flags);
320 trace_ctx = tracing_gen_ctx_flags(flags);
321 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
323 trace_function(tr, ip, parent_ip, trace_ctx);
326 ftrace_test_recursion_unlock(bit);
327 preempt_enable_notrace();
331 function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
332 struct ftrace_ops *op,
333 struct ftrace_regs *fregs)
335 struct trace_func_repeats *last_info;
336 struct trace_array *tr = op->private;
337 struct trace_array_cpu *data;
341 unsigned int trace_ctx;
343 if (unlikely(!tr->function_enabled))
347 * Need to use raw, since this must be called before the
348 * recursive protection is performed.
350 local_irq_save(flags);
351 cpu = raw_smp_processor_id();
352 data = per_cpu_ptr(tr->array_buffer.data, cpu);
353 disabled = atomic_inc_return(&data->disabled);
355 if (likely(disabled == 1)) {
356 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
357 if (is_repeat_check(tr, last_info, ip, parent_ip))
360 trace_ctx = tracing_gen_ctx_flags(flags);
361 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
363 trace_function(tr, ip, parent_ip, trace_ctx);
364 __trace_stack(tr, trace_ctx, STACK_SKIP);
368 atomic_dec(&data->disabled);
369 local_irq_restore(flags);
372 static struct tracer_opt func_opts[] = {
373 #ifdef CONFIG_STACKTRACE
374 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
376 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
377 { } /* Always set a last empty entry */
380 static struct tracer_flags func_flags = {
381 .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
385 static void tracing_start_function_trace(struct trace_array *tr)
387 tr->function_enabled = 0;
388 register_ftrace_function(tr->ops);
389 tr->function_enabled = 1;
392 static void tracing_stop_function_trace(struct trace_array *tr)
394 tr->function_enabled = 0;
395 unregister_ftrace_function(tr->ops);
398 static struct tracer function_trace;
401 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
406 /* Do nothing if already set. */
407 if (!!set == !!(func_flags.val & bit))
410 /* We can change this flag only when not running. */
411 if (tr->current_trace != &function_trace)
414 new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
415 func = select_trace_function(new_flags);
419 /* Check if there's anything to change. */
420 if (tr->ops->func == func)
423 if (!handle_func_repeats(tr, new_flags))
426 unregister_ftrace_function(tr->ops);
427 tr->ops->func = func;
428 register_ftrace_function(tr->ops);
433 static struct tracer function_trace __tracer_data =
436 .init = function_trace_init,
437 .reset = function_trace_reset,
438 .start = function_trace_start,
439 .flags = &func_flags,
440 .set_flag = func_set_flag,
441 .allow_instances = true,
442 #ifdef CONFIG_FTRACE_SELFTEST
443 .selftest = trace_selftest_startup_function,
447 #ifdef CONFIG_DYNAMIC_FTRACE
448 static void update_traceon_count(struct ftrace_probe_ops *ops,
450 struct trace_array *tr, bool on,
453 struct ftrace_func_mapper *mapper = data;
458 * Tracing gets disabled (or enabled) once per count.
459 * This function can be called at the same time on multiple CPUs.
460 * It is fine if both disable (or enable) tracing, as disabling
461 * (or enabling) the second time doesn't do anything as the
462 * state of the tracer is already disabled (or enabled).
463 * What needs to be synchronized in this case is that the count
464 * only gets decremented once, even if the tracer is disabled
465 * (or enabled) twice, as the second one is really a nop.
467 * The memory barriers guarantee that we only decrement the
468 * counter once. First the count is read to a local variable
469 * and a read barrier is used to make sure that it is loaded
470 * before checking if the tracer is in the state we want.
471 * If the tracer is not in the state we want, then the count
472 * is guaranteed to be the old count.
474 * Next the tracer is set to the state we want (disabled or enabled)
475 * then a write memory barrier is used to make sure that
476 * the new state is visible before changing the counter by
477 * one minus the old counter. This guarantees that another CPU
478 * executing this code will see the new state before seeing
479 * the new counter value, and would not do anything if the new
482 * Note, there is no synchronization between this and a user
483 * setting the tracing_on file. But we currently don't care
486 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
492 /* Make sure we see count before checking tracing state */
495 if (on == !!tracer_tracing_is_on(tr))
499 tracer_tracing_on(tr);
501 tracer_tracing_off(tr);
503 /* Make sure tracing state is visible before updating count */
506 *count = old_count - 1;
510 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
511 struct trace_array *tr, struct ftrace_probe_ops *ops,
514 update_traceon_count(ops, ip, tr, 1, data);
518 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
519 struct trace_array *tr, struct ftrace_probe_ops *ops,
522 update_traceon_count(ops, ip, tr, 0, data);
526 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
527 struct trace_array *tr, struct ftrace_probe_ops *ops,
530 if (tracer_tracing_is_on(tr))
533 tracer_tracing_on(tr);
537 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
538 struct trace_array *tr, struct ftrace_probe_ops *ops,
541 if (!tracer_tracing_is_on(tr))
544 tracer_tracing_off(tr);
547 #ifdef CONFIG_UNWINDER_ORC
551 * function_trace_probe_call()
552 * ftrace_ops_assist_func()
555 #define FTRACE_STACK_SKIP 3
561 * ftrace_stacktrace()
562 * function_trace_probe_call()
563 * ftrace_ops_assist_func()
566 #define FTRACE_STACK_SKIP 5
569 static __always_inline void trace_stack(struct trace_array *tr)
571 unsigned int trace_ctx;
573 trace_ctx = tracing_gen_ctx();
575 __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
579 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
580 struct trace_array *tr, struct ftrace_probe_ops *ops,
587 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
588 struct trace_array *tr, struct ftrace_probe_ops *ops,
591 struct ftrace_func_mapper *mapper = data;
596 if (!tracing_is_on())
605 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
608 * Stack traces should only execute the number of times the
609 * user specified in the counter.
617 new_count = old_count - 1;
618 new_count = cmpxchg(count, old_count, new_count);
619 if (new_count == old_count)
622 if (!tracing_is_on())
625 } while (new_count != old_count);
628 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
631 struct ftrace_func_mapper *mapper = data;
635 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
647 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
648 struct trace_array *tr, struct ftrace_probe_ops *ops,
651 if (update_count(ops, ip, data))
652 ftrace_dump(DUMP_ALL);
655 /* Only dump the current CPU buffer. */
657 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
658 struct trace_array *tr, struct ftrace_probe_ops *ops,
661 if (update_count(ops, ip, data))
662 ftrace_dump(DUMP_ORIG);
666 ftrace_probe_print(const char *name, struct seq_file *m,
667 unsigned long ip, struct ftrace_probe_ops *ops,
670 struct ftrace_func_mapper *mapper = data;
673 seq_printf(m, "%ps:%s", (void *)ip, name);
676 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
679 seq_printf(m, ":count=%ld\n", *count);
681 seq_puts(m, ":unlimited\n");
687 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
688 struct ftrace_probe_ops *ops,
691 return ftrace_probe_print("traceon", m, ip, ops, data);
695 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
696 struct ftrace_probe_ops *ops, void *data)
698 return ftrace_probe_print("traceoff", m, ip, ops, data);
702 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
703 struct ftrace_probe_ops *ops, void *data)
705 return ftrace_probe_print("stacktrace", m, ip, ops, data);
709 ftrace_dump_print(struct seq_file *m, unsigned long ip,
710 struct ftrace_probe_ops *ops, void *data)
712 return ftrace_probe_print("dump", m, ip, ops, data);
716 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
717 struct ftrace_probe_ops *ops, void *data)
719 return ftrace_probe_print("cpudump", m, ip, ops, data);
724 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
725 unsigned long ip, void *init_data, void **data)
727 struct ftrace_func_mapper *mapper = *data;
730 mapper = allocate_ftrace_func_mapper();
736 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
740 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
741 unsigned long ip, void *data)
743 struct ftrace_func_mapper *mapper = data;
746 free_ftrace_func_mapper(mapper, NULL);
750 ftrace_func_mapper_remove_ip(mapper, ip);
753 static struct ftrace_probe_ops traceon_count_probe_ops = {
754 .func = ftrace_traceon_count,
755 .print = ftrace_traceon_print,
756 .init = ftrace_count_init,
757 .free = ftrace_count_free,
760 static struct ftrace_probe_ops traceoff_count_probe_ops = {
761 .func = ftrace_traceoff_count,
762 .print = ftrace_traceoff_print,
763 .init = ftrace_count_init,
764 .free = ftrace_count_free,
767 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
768 .func = ftrace_stacktrace_count,
769 .print = ftrace_stacktrace_print,
770 .init = ftrace_count_init,
771 .free = ftrace_count_free,
774 static struct ftrace_probe_ops dump_probe_ops = {
775 .func = ftrace_dump_probe,
776 .print = ftrace_dump_print,
777 .init = ftrace_count_init,
778 .free = ftrace_count_free,
781 static struct ftrace_probe_ops cpudump_probe_ops = {
782 .func = ftrace_cpudump_probe,
783 .print = ftrace_cpudump_print,
786 static struct ftrace_probe_ops traceon_probe_ops = {
787 .func = ftrace_traceon,
788 .print = ftrace_traceon_print,
791 static struct ftrace_probe_ops traceoff_probe_ops = {
792 .func = ftrace_traceoff,
793 .print = ftrace_traceoff_print,
796 static struct ftrace_probe_ops stacktrace_probe_ops = {
797 .func = ftrace_stacktrace,
798 .print = ftrace_stacktrace_print,
802 ftrace_trace_probe_callback(struct trace_array *tr,
803 struct ftrace_probe_ops *ops,
804 struct ftrace_hash *hash, char *glob,
805 char *cmd, char *param, int enable)
807 void *count = (void *)-1;
811 /* hash funcs only work with set_ftrace_filter */
816 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
821 number = strsep(¶m, ":");
827 * We use the callback data field (which is a pointer)
830 ret = kstrtoul(number, 0, (unsigned long *)&count);
835 ret = register_ftrace_function_probe(glob, tr, ops, count);
837 return ret < 0 ? ret : 0;
841 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
842 char *glob, char *cmd, char *param, int enable)
844 struct ftrace_probe_ops *ops;
849 /* we register both traceon and traceoff to this callback */
850 if (strcmp(cmd, "traceon") == 0)
851 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
853 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
855 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
860 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
861 char *glob, char *cmd, char *param, int enable)
863 struct ftrace_probe_ops *ops;
868 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
870 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
875 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
876 char *glob, char *cmd, char *param, int enable)
878 struct ftrace_probe_ops *ops;
883 ops = &dump_probe_ops;
885 /* Only dump once. */
886 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
891 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
892 char *glob, char *cmd, char *param, int enable)
894 struct ftrace_probe_ops *ops;
899 ops = &cpudump_probe_ops;
901 /* Only dump once. */
902 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
906 static struct ftrace_func_command ftrace_traceon_cmd = {
908 .func = ftrace_trace_onoff_callback,
911 static struct ftrace_func_command ftrace_traceoff_cmd = {
913 .func = ftrace_trace_onoff_callback,
916 static struct ftrace_func_command ftrace_stacktrace_cmd = {
917 .name = "stacktrace",
918 .func = ftrace_stacktrace_callback,
921 static struct ftrace_func_command ftrace_dump_cmd = {
923 .func = ftrace_dump_callback,
926 static struct ftrace_func_command ftrace_cpudump_cmd = {
928 .func = ftrace_cpudump_callback,
931 static int __init init_func_cmd_traceon(void)
935 ret = register_ftrace_command(&ftrace_traceoff_cmd);
939 ret = register_ftrace_command(&ftrace_traceon_cmd);
941 goto out_free_traceoff;
943 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
945 goto out_free_traceon;
947 ret = register_ftrace_command(&ftrace_dump_cmd);
949 goto out_free_stacktrace;
951 ret = register_ftrace_command(&ftrace_cpudump_cmd);
958 unregister_ftrace_command(&ftrace_dump_cmd);
960 unregister_ftrace_command(&ftrace_stacktrace_cmd);
962 unregister_ftrace_command(&ftrace_traceon_cmd);
964 unregister_ftrace_command(&ftrace_traceoff_cmd);
969 static inline int init_func_cmd_traceon(void)
973 #endif /* CONFIG_DYNAMIC_FTRACE */
975 __init int init_function_trace(void)
977 init_func_cmd_traceon();
978 return register_tracer(&function_trace);