Merge tag 'mfd-next-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[linux-2.6-microblaze.git] / kernel / trace / trace_functions.c
index f93723c..1f0e63f 100644 (file)
@@ -27,13 +27,28 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
                          struct ftrace_ops *op, struct ftrace_regs *fregs);
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+                              struct ftrace_ops *op, struct ftrace_regs *fregs);
+static void
+function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+                                    struct ftrace_ops *op,
+                                    struct ftrace_regs *fregs);
 static struct tracer_flags func_flags;
 
 /* Our option */
 enum {
-       TRACE_FUNC_OPT_STACK    = 0x1,
+
+       TRACE_FUNC_NO_OPTS              = 0x0, /* No flags set. */
+       TRACE_FUNC_OPT_STACK            = 0x1,
+       TRACE_FUNC_OPT_NO_REPEATS       = 0x2,
+
+       /* Update this to next highest bit. */
+       TRACE_FUNC_OPT_HIGHEST_BIT      = 0x4
 };
 
+#define TRACE_FUNC_OPT_MASK    (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
+
 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 {
        struct ftrace_ops *ops;
@@ -86,6 +101,34 @@ void ftrace_destroy_function_files(struct trace_array *tr)
        ftrace_free_ftrace_ops(tr);
 }
 
+static ftrace_func_t select_trace_function(u32 flags_val)
+{
+       switch (flags_val & TRACE_FUNC_OPT_MASK) {
+       case TRACE_FUNC_NO_OPTS:
+               return function_trace_call;
+       case TRACE_FUNC_OPT_STACK:
+               return function_stack_trace_call;
+       case TRACE_FUNC_OPT_NO_REPEATS:
+               return function_no_repeats_trace_call;
+       case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
+               return function_stack_no_repeats_trace_call;
+       default:
+               return NULL;
+       }
+}
+
+static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
+{
+       if (!tr->last_func_repeats &&
+           (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
+               tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
+               if (!tr->last_func_repeats)
+                       return false;
+       }
+
+       return true;
+}
+
 static int function_trace_init(struct trace_array *tr)
 {
        ftrace_func_t func;
@@ -97,12 +140,12 @@ static int function_trace_init(struct trace_array *tr)
        if (!tr->ops)
                return -ENOMEM;
 
-       /* Currently only the global instance can do stack tracing */
-       if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
-           func_flags.val & TRACE_FUNC_OPT_STACK)
-               func = function_stack_trace_call;
-       else
-               func = function_trace_call;
+       func = select_trace_function(func_flags.val);
+       if (!func)
+               return -EINVAL;
+
+       if (!handle_func_repeats(tr, func_flags.val))
+               return -ENOMEM;
 
        ftrace_init_array_ops(tr, func);
 
@@ -205,15 +248,137 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
        local_irq_restore(flags);
 }
 
+static inline bool is_repeat_check(struct trace_array *tr,
+                                  struct trace_func_repeats *last_info,
+                                  unsigned long ip, unsigned long parent_ip)
+{
+       if (last_info->ip == ip &&
+           last_info->parent_ip == parent_ip &&
+           last_info->count < U16_MAX) {
+               last_info->ts_last_call =
+                       ring_buffer_time_stamp(tr->array_buffer.buffer);
+               last_info->count++;
+               return true;
+       }
+
+       return false;
+}
+
+static inline void process_repeats(struct trace_array *tr,
+                                  unsigned long ip, unsigned long parent_ip,
+                                  struct trace_func_repeats *last_info,
+                                  unsigned int trace_ctx)
+{
+       if (last_info->count) {
+               trace_last_func_repeats(tr, last_info, trace_ctx);
+               last_info->count = 0;
+       }
+
+       last_info->ip = ip;
+       last_info->parent_ip = parent_ip;
+}
+
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+                              struct ftrace_ops *op,
+                              struct ftrace_regs *fregs)
+{
+       struct trace_func_repeats *last_info;
+       struct trace_array *tr = op->private;
+       struct trace_array_cpu *data;
+       unsigned int trace_ctx;
+       unsigned long flags;
+       int bit;
+       int cpu;
+
+       if (unlikely(!tr->function_enabled))
+               return;
+
+       bit = ftrace_test_recursion_trylock(ip, parent_ip);
+       if (bit < 0)
+               return;
+
+       preempt_disable_notrace();
+
+       cpu = smp_processor_id();
+       data = per_cpu_ptr(tr->array_buffer.data, cpu);
+       if (atomic_read(&data->disabled))
+               goto out;
+
+       /*
+        * An interrupt may happen at any place here. But as far as I can see,
+        * the only damage that this can cause is to mess up the repetition
+        * counter without valuable data being lost.
+        * TODO: think about a solution that is better than just hoping to be
+        * lucky.
+        */
+       last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
+       if (is_repeat_check(tr, last_info, ip, parent_ip))
+               goto out;
+
+       local_save_flags(flags);
+       trace_ctx = tracing_gen_ctx_flags(flags);
+       process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
+
+       trace_function(tr, ip, parent_ip, trace_ctx);
+
+out:
+       ftrace_test_recursion_unlock(bit);
+       preempt_enable_notrace();
+}
+
+static void
+function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+                                    struct ftrace_ops *op,
+                                    struct ftrace_regs *fregs)
+{
+       struct trace_func_repeats *last_info;
+       struct trace_array *tr = op->private;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       unsigned int trace_ctx;
+
+       if (unlikely(!tr->function_enabled))
+               return;
+
+       /*
+        * Need to use raw, since this must be called before the
+        * recursive protection is performed.
+        */
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = per_cpu_ptr(tr->array_buffer.data, cpu);
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1)) {
+               last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
+               if (is_repeat_check(tr, last_info, ip, parent_ip))
+                       goto out;
+
+               trace_ctx = tracing_gen_ctx_flags(flags);
+               process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
+
+               trace_function(tr, ip, parent_ip, trace_ctx);
+               __trace_stack(tr, trace_ctx, STACK_SKIP);
+       }
+
+ out:
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+
 static struct tracer_opt func_opts[] = {
 #ifdef CONFIG_STACKTRACE
        { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
 #endif
+       { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
        { } /* Always set a last empty entry */
 };
 
 static struct tracer_flags func_flags = {
-       .val = 0, /* By default: all flags disabled */
+       .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
        .opts = func_opts
 };
 
@@ -235,30 +400,32 @@ static struct tracer function_trace;
 static int
 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 {
-       switch (bit) {
-       case TRACE_FUNC_OPT_STACK:
-               /* do nothing if already set */
-               if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
-                       break;
-
-               /* We can change this flag when not running. */
-               if (tr->current_trace != &function_trace)
-                       break;
+       ftrace_func_t func;
+       u32 new_flags;
 
-               unregister_ftrace_function(tr->ops);
+       /* Do nothing if already set. */
+       if (!!set == !!(func_flags.val & bit))
+               return 0;
 
-               if (set) {
-                       tr->ops->func = function_stack_trace_call;
-                       register_ftrace_function(tr->ops);
-               } else {
-                       tr->ops->func = function_trace_call;
-                       register_ftrace_function(tr->ops);
-               }
+       /* We can change this flag only when not running. */
+       if (tr->current_trace != &function_trace)
+               return 0;
 
-               break;
-       default:
+       new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
+       func = select_trace_function(new_flags);
+       if (!func)
                return -EINVAL;
-       }
+
+       /* Check if there's anything to change. */
+       if (tr->ops->func == func)
+               return 0;
+
+       if (!handle_func_repeats(tr, new_flags))
+               return -ENOMEM;
+
+       unregister_ftrace_function(tr->ops);
+       tr->ops->func = func;
+       register_ftrace_function(tr->ops);
 
        return 0;
 }