Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / kernel / trace / ftrace.c
index f0ff241..aac7847 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/sched/task.h>
 #include <linux/kallsyms.h>
 #include <linux/seq_file.h>
-#include <linux/suspend.h>
 #include <linux/tracefs.h>
 #include <linux/hardirq.h>
 #include <linux/kthread.h>
@@ -40,6 +39,7 @@
 #include <asm/sections.h>
 #include <asm/setup.h>
 
+#include "ftrace_internal.h"
 #include "trace_output.h"
 #include "trace_stat.h"
 
 #define ASSIGN_OPS_HASH(opsname, val)
 #endif
 
-static struct ftrace_ops ftrace_list_end __read_mostly = {
+enum {
+       FTRACE_MODIFY_ENABLE_FL         = (1 << 0),
+       FTRACE_MODIFY_MAY_SLEEP_FL      = (1 << 1),
+};
+
+struct ftrace_ops ftrace_list_end __read_mostly = {
        .func           = ftrace_stub,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
        INIT_OPS_HASH(ftrace_list_end)
@@ -112,11 +117,11 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops);
  */
 static int ftrace_disabled __read_mostly;
 
-static DEFINE_MUTEX(ftrace_lock);
+DEFINE_MUTEX(ftrace_lock);
 
-static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
+struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-static struct ftrace_ops global_ops;
+struct ftrace_ops global_ops;
 
 #if ARCH_SUPPORTS_FTRACE_OPS
 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
@@ -127,26 +132,6 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 #endif
 
-/*
- * Traverse the ftrace_global_list, invoking all entries.  The reason that we
- * can use rcu_dereference_raw_notrace() is that elements removed from this list
- * are simply leaked, so there is no need to interact with a grace-period
- * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
- * concurrent insertions into the ftrace_global_list.
- *
- * Silly Alpha and silly pointer-speculation compiler optimizations!
- */
-#define do_for_each_ftrace_op(op, list)                        \
-       op = rcu_dereference_raw_notrace(list);                 \
-       do
-
-/*
- * Optimized for just a single item in the list (as that is the normal case).
- */
-#define while_for_each_ftrace_op(op)                           \
-       while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
-              unlikely((op) != &ftrace_list_end))
-
 static inline void ftrace_ops_init(struct ftrace_ops *ops)
 {
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -186,18 +171,6 @@ static void ftrace_sync_ipi(void *data)
        smp_rmb();
 }
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static void update_function_graph_func(void);
-
-/* Both enabled by default (can be cleared by function_graph tracer flags */
-static bool fgraph_sleep_time = true;
-static bool fgraph_graph_time = true;
-
-#else
-static inline void update_function_graph_func(void) { }
-#endif
-
-
 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
 {
        /*
@@ -334,7 +307,7 @@ static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
 
 static void ftrace_update_trampoline(struct ftrace_ops *ops);
 
-static int __register_ftrace_function(struct ftrace_ops *ops)
+int __register_ftrace_function(struct ftrace_ops *ops)
 {
        if (ops->flags & FTRACE_OPS_FL_DELETED)
                return -EINVAL;
@@ -375,7 +348,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        return 0;
 }
 
-static int __unregister_ftrace_function(struct ftrace_ops *ops)
+int __unregister_ftrace_function(struct ftrace_ops *ops)
 {
        int ret;
 
@@ -815,9 +788,16 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
 }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static bool fgraph_graph_time = true;
+
+void ftrace_graph_graph_time_control(bool enable)
+{
+       fgraph_graph_time = enable;
+}
+
 static int profile_graph_entry(struct ftrace_graph_ent *trace)
 {
-       int index = current->curr_ret_stack;
+       struct ftrace_ret_stack *ret_stack;
 
        function_profile_call(trace->func, 0, NULL, NULL);
 
@@ -825,14 +805,16 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
        if (!current->ret_stack)
                return 0;
 
-       if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
-               current->ret_stack[index].subtime = 0;
+       ret_stack = ftrace_graph_get_ret_stack(current, 0);
+       if (ret_stack)
+               ret_stack->subtime = 0;
 
        return 1;
 }
 
 static void profile_graph_return(struct ftrace_graph_ret *trace)
 {
+       struct ftrace_ret_stack *ret_stack;
        struct ftrace_profile_stat *stat;
        unsigned long long calltime;
        struct ftrace_profile *rec;
@@ -850,16 +832,15 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
        calltime = trace->rettime - trace->calltime;
 
        if (!fgraph_graph_time) {
-               int index;
-
-               index = current->curr_ret_stack;
 
                /* Append this call time to the parent time to subtract */
-               if (index)
-                       current->ret_stack[index - 1].subtime += calltime;
+               ret_stack = ftrace_graph_get_ret_stack(current, 1);
+               if (ret_stack)
+                       ret_stack->subtime += calltime;
 
-               if (current->ret_stack[index].subtime < calltime)
-                       calltime -= current->ret_stack[index].subtime;
+               ret_stack = ftrace_graph_get_ret_stack(current, 0);
+               if (ret_stack && ret_stack->subtime < calltime)
+                       calltime -= ret_stack->subtime;
                else
                        calltime = 0;
        }
@@ -874,15 +855,19 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
        local_irq_restore(flags);
 }
 
+static struct fgraph_ops fprofiler_ops = {
+       .entryfunc = &profile_graph_entry,
+       .retfunc = &profile_graph_return,
+};
+
 static int register_ftrace_profiler(void)
 {
-       return register_ftrace_graph(&profile_graph_return,
-                                    &profile_graph_entry);
+       return register_ftrace_graph(&fprofiler_ops);
 }
 
 static void unregister_ftrace_profiler(void)
 {
-       unregister_ftrace_graph();
+       unregister_ftrace_graph(&fprofiler_ops);
 }
 #else
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
@@ -1021,12 +1006,6 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
 }
 #endif /* CONFIG_FUNCTION_PROFILER */
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static int ftrace_graph_active;
-#else
-# define ftrace_graph_active 0
-#endif
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 static struct ftrace_ops *removed_ops;
@@ -1067,7 +1046,7 @@ static const struct ftrace_hash empty_hash = {
 };
 #define EMPTY_HASH     ((struct ftrace_hash *)&empty_hash)
 
-static struct ftrace_ops global_ops = {
+struct ftrace_ops global_ops = {
        .func                           = ftrace_stub,
        .local_hash.notrace_hash        = EMPTY_HASH,
        .local_hash.filter_hash         = EMPTY_HASH,
@@ -1503,7 +1482,7 @@ static bool hash_contains_ip(unsigned long ip,
  * This needs to be called with preemption disabled as
  * the hashes are freed with call_rcu().
  */
-static int
+int
 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
 {
        struct ftrace_ops_hash hash;
@@ -2415,10 +2394,12 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
        return -1; /* unknow ftrace bug */
 }
 
-void __weak ftrace_replace_code(int enable)
+void __weak ftrace_replace_code(int mod_flags)
 {
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
+       int enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
+       int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
        int failed;
 
        if (unlikely(ftrace_disabled))
@@ -2435,6 +2416,8 @@ void __weak ftrace_replace_code(int enable)
                        /* Stop processing */
                        return;
                }
+               if (schedulable)
+                       cond_resched();
        } while_for_each_ftrace_rec();
 }
 
@@ -2548,8 +2531,12 @@ int __weak ftrace_arch_code_modify_post_process(void)
 void ftrace_modify_all_code(int command)
 {
        int update = command & FTRACE_UPDATE_TRACE_FUNC;
+       int mod_flags = 0;
        int err = 0;
 
+       if (command & FTRACE_MAY_SLEEP)
+               mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
+
        /*
         * If the ftrace_caller calls a ftrace_ops func directly,
         * we need to make sure that it only traces functions it
@@ -2567,9 +2554,9 @@ void ftrace_modify_all_code(int command)
        }
 
        if (command & FTRACE_UPDATE_CALLS)
-               ftrace_replace_code(1);
+               ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
        else if (command & FTRACE_DISABLE_CALLS)
-               ftrace_replace_code(0);
+               ftrace_replace_code(mod_flags);
 
        if (update && ftrace_trace_function != ftrace_ops_list_func) {
                function_trace_op = set_function_trace_op;
@@ -2682,7 +2669,7 @@ static void ftrace_startup_all(int command)
        update_all_ops = false;
 }
 
-static int ftrace_startup(struct ftrace_ops *ops, int command)
+int ftrace_startup(struct ftrace_ops *ops, int command)
 {
        int ret;
 
@@ -2724,7 +2711,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
        return 0;
 }
 
-static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+int ftrace_shutdown(struct ftrace_ops *ops, int command)
 {
        int ret;
 
@@ -6178,7 +6165,7 @@ void ftrace_init_trace_array(struct trace_array *tr)
 }
 #else
 
-static struct ftrace_ops global_ops = {
+struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
                                  FTRACE_OPS_FL_INITIALIZED |
@@ -6195,31 +6182,10 @@ core_initcall(ftrace_nodyn_init);
 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
 static inline void ftrace_startup_enable(int command) { }
 static inline void ftrace_startup_all(int command) { }
-/* Keep as macros so we do not need to define the commands */
-# define ftrace_startup(ops, command)                                  \
-       ({                                                              \
-               int ___ret = __register_ftrace_function(ops);           \
-               if (!___ret)                                            \
-                       (ops)->flags |= FTRACE_OPS_FL_ENABLED;          \
-               ___ret;                                                 \
-       })
-# define ftrace_shutdown(ops, command)                                 \
-       ({                                                              \
-               int ___ret = __unregister_ftrace_function(ops);         \
-               if (!___ret)                                            \
-                       (ops)->flags &= ~FTRACE_OPS_FL_ENABLED;         \
-               ___ret;                                                 \
-       })
 
 # define ftrace_startup_sysctl()       do { } while (0)
 # define ftrace_shutdown_sysctl()      do { } while (0)
 
-static inline int
-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
-{
-       return 1;
-}
-
 static void ftrace_update_trampoline(struct ftrace_ops *ops)
 {
 }
@@ -6746,353 +6712,3 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
        mutex_unlock(&ftrace_lock);
        return ret;
 }
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-static struct ftrace_ops graph_ops = {
-       .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
-                                  FTRACE_OPS_FL_INITIALIZED |
-                                  FTRACE_OPS_FL_PID |
-                                  FTRACE_OPS_FL_STUB,
-#ifdef FTRACE_GRAPH_TRAMP_ADDR
-       .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
-       /* trampoline_size is only needed for dynamically allocated tramps */
-#endif
-       ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
-};
-
-void ftrace_graph_sleep_time_control(bool enable)
-{
-       fgraph_sleep_time = enable;
-}
-
-void ftrace_graph_graph_time_control(bool enable)
-{
-       fgraph_graph_time = enable;
-}
-
-int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
-{
-       return 0;
-}
-
-/* The callbacks that hook a function */
-trace_func_graph_ret_t ftrace_graph_return =
-                       (trace_func_graph_ret_t)ftrace_stub;
-trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
-static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
-
-/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
-static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
-{
-       int i;
-       int ret = 0;
-       int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
-       struct task_struct *g, *t;
-
-       for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
-               ret_stack_list[i] =
-                       kmalloc_array(FTRACE_RETFUNC_DEPTH,
-                                     sizeof(struct ftrace_ret_stack),
-                                     GFP_KERNEL);
-               if (!ret_stack_list[i]) {
-                       start = 0;
-                       end = i;
-                       ret = -ENOMEM;
-                       goto free;
-               }
-       }
-
-       read_lock(&tasklist_lock);
-       do_each_thread(g, t) {
-               if (start == end) {
-                       ret = -EAGAIN;
-                       goto unlock;
-               }
-
-               if (t->ret_stack == NULL) {
-                       atomic_set(&t->tracing_graph_pause, 0);
-                       atomic_set(&t->trace_overrun, 0);
-                       t->curr_ret_stack = -1;
-                       t->curr_ret_depth = -1;
-                       /* Make sure the tasks see the -1 first: */
-                       smp_wmb();
-                       t->ret_stack = ret_stack_list[start++];
-               }
-       } while_each_thread(g, t);
-
-unlock:
-       read_unlock(&tasklist_lock);
-free:
-       for (i = start; i < end; i++)
-               kfree(ret_stack_list[i]);
-       return ret;
-}
-
-static void
-ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
-                       struct task_struct *prev, struct task_struct *next)
-{
-       unsigned long long timestamp;
-       int index;
-
-       /*
-        * Does the user want to count the time a function was asleep.
-        * If so, do not update the time stamps.
-        */
-       if (fgraph_sleep_time)
-               return;
-
-       timestamp = trace_clock_local();
-
-       prev->ftrace_timestamp = timestamp;
-
-       /* only process tasks that we timestamped */
-       if (!next->ftrace_timestamp)
-               return;
-
-       /*
-        * Update all the counters in next to make up for the
-        * time next was sleeping.
-        */
-       timestamp -= next->ftrace_timestamp;
-
-       for (index = next->curr_ret_stack; index >= 0; index--)
-               next->ret_stack[index].calltime += timestamp;
-}
-
-/* Allocate a return stack for each task */
-static int start_graph_tracing(void)
-{
-       struct ftrace_ret_stack **ret_stack_list;
-       int ret, cpu;
-
-       ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
-                                      sizeof(struct ftrace_ret_stack *),
-                                      GFP_KERNEL);
-
-       if (!ret_stack_list)
-               return -ENOMEM;
-
-       /* The cpu_boot init_task->ret_stack will never be freed */
-       for_each_online_cpu(cpu) {
-               if (!idle_task(cpu)->ret_stack)
-                       ftrace_graph_init_idle_task(idle_task(cpu), cpu);
-       }
-
-       do {
-               ret = alloc_retstack_tasklist(ret_stack_list);
-       } while (ret == -EAGAIN);
-
-       if (!ret) {
-               ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
-               if (ret)
-                       pr_info("ftrace_graph: Couldn't activate tracepoint"
-                               " probe to kernel_sched_switch\n");
-       }
-
-       kfree(ret_stack_list);
-       return ret;
-}
-
-/*
- * Hibernation protection.
- * The state of the current task is too much unstable during
- * suspend/restore to disk. We want to protect against that.
- */
-static int
-ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
-                                                       void *unused)
-{
-       switch (state) {
-       case PM_HIBERNATION_PREPARE:
-               pause_graph_tracing();
-               break;
-
-       case PM_POST_HIBERNATION:
-               unpause_graph_tracing();
-               break;
-       }
-       return NOTIFY_DONE;
-}
-
-static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
-{
-       if (!ftrace_ops_test(&global_ops, trace->func, NULL))
-               return 0;
-       return __ftrace_graph_entry(trace);
-}
-
-/*
- * The function graph tracer should only trace the functions defined
- * by set_ftrace_filter and set_ftrace_notrace. If another function
- * tracer ops is registered, the graph tracer requires testing the
- * function against the global ops, and not just trace any function
- * that any ftrace_ops registered.
- */
-static void update_function_graph_func(void)
-{
-       struct ftrace_ops *op;
-       bool do_test = false;
-
-       /*
-        * The graph and global ops share the same set of functions
-        * to test. If any other ops is on the list, then
-        * the graph tracing needs to test if its the function
-        * it should call.
-        */
-       do_for_each_ftrace_op(op, ftrace_ops_list) {
-               if (op != &global_ops && op != &graph_ops &&
-                   op != &ftrace_list_end) {
-                       do_test = true;
-                       /* in double loop, break out with goto */
-                       goto out;
-               }
-       } while_for_each_ftrace_op(op);
- out:
-       if (do_test)
-               ftrace_graph_entry = ftrace_graph_entry_test;
-       else
-               ftrace_graph_entry = __ftrace_graph_entry;
-}
-
-static struct notifier_block ftrace_suspend_notifier = {
-       .notifier_call = ftrace_suspend_notifier_call,
-};
-
-int register_ftrace_graph(trace_func_graph_ret_t retfunc,
-                       trace_func_graph_ent_t entryfunc)
-{
-       int ret = 0;
-
-       mutex_lock(&ftrace_lock);
-
-       /* we currently allow only one tracer registered at a time */
-       if (ftrace_graph_active) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       register_pm_notifier(&ftrace_suspend_notifier);
-
-       ftrace_graph_active++;
-       ret = start_graph_tracing();
-       if (ret) {
-               ftrace_graph_active--;
-               goto out;
-       }
-
-       ftrace_graph_return = retfunc;
-
-       /*
-        * Update the indirect function to the entryfunc, and the
-        * function that gets called to the entry_test first. Then
-        * call the update fgraph entry function to determine if
-        * the entryfunc should be called directly or not.
-        */
-       __ftrace_graph_entry = entryfunc;
-       ftrace_graph_entry = ftrace_graph_entry_test;
-       update_function_graph_func();
-
-       ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
-out:
-       mutex_unlock(&ftrace_lock);
-       return ret;
-}
-
-void unregister_ftrace_graph(void)
-{
-       mutex_lock(&ftrace_lock);
-
-       if (unlikely(!ftrace_graph_active))
-               goto out;
-
-       ftrace_graph_active--;
-       ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
-       ftrace_graph_entry = ftrace_graph_entry_stub;
-       __ftrace_graph_entry = ftrace_graph_entry_stub;
-       ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
-       unregister_pm_notifier(&ftrace_suspend_notifier);
-       unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
-
- out:
-       mutex_unlock(&ftrace_lock);
-}
-
-static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
-
-static void
-graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
-{
-       atomic_set(&t->tracing_graph_pause, 0);
-       atomic_set(&t->trace_overrun, 0);
-       t->ftrace_timestamp = 0;
-       /* make curr_ret_stack visible before we add the ret_stack */
-       smp_wmb();
-       t->ret_stack = ret_stack;
-}
-
-/*
- * Allocate a return stack for the idle task. May be the first
- * time through, or it may be done by CPU hotplug online.
- */
-void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
-{
-       t->curr_ret_stack = -1;
-       t->curr_ret_depth = -1;
-       /*
-        * The idle task has no parent, it either has its own
-        * stack or no stack at all.
-        */
-       if (t->ret_stack)
-               WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
-
-       if (ftrace_graph_active) {
-               struct ftrace_ret_stack *ret_stack;
-
-               ret_stack = per_cpu(idle_ret_stack, cpu);
-               if (!ret_stack) {
-                       ret_stack =
-                               kmalloc_array(FTRACE_RETFUNC_DEPTH,
-                                             sizeof(struct ftrace_ret_stack),
-                                             GFP_KERNEL);
-                       if (!ret_stack)
-                               return;
-                       per_cpu(idle_ret_stack, cpu) = ret_stack;
-               }
-               graph_init_task(t, ret_stack);
-       }
-}
-
-/* Allocate a return stack for newly created task */
-void ftrace_graph_init_task(struct task_struct *t)
-{
-       /* Make sure we do not use the parent ret_stack */
-       t->ret_stack = NULL;
-       t->curr_ret_stack = -1;
-       t->curr_ret_depth = -1;
-
-       if (ftrace_graph_active) {
-               struct ftrace_ret_stack *ret_stack;
-
-               ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
-                                         sizeof(struct ftrace_ret_stack),
-                                         GFP_KERNEL);
-               if (!ret_stack)
-                       return;
-               graph_init_task(t, ret_stack);
-       }
-}
-
-void ftrace_graph_exit_task(struct task_struct *t)
-{
-       struct ftrace_ret_stack *ret_stack = t->ret_stack;
-
-       t->ret_stack = NULL;
-       /* NULL must become visible to IRQs before we free it: */
-       barrier();
-
-       kfree(ret_stack);
-}
-#endif