Merge tag 'arc-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[linux-2.6-microblaze.git] / kernel / trace / ftrace.c
index 31c90fe..929a733 100644 (file)
@@ -387,6 +387,8 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
        return ret;
 }
 
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
+
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
        if (ops->flags & FTRACE_OPS_FL_DELETED)
@@ -416,9 +418,13 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
                if (control_ops_alloc(ops))
                        return -ENOMEM;
                add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
+               /* The control_ops needs the trampoline update */
+               ops = &control_ops;
        } else
                add_ftrace_ops(&ftrace_ops_list, ops);
 
+       ftrace_update_trampoline(ops);
+
        if (ftrace_enabled)
                update_ftrace_function();
 
@@ -565,13 +571,13 @@ static int function_stat_cmp(void *p1, void *p2)
 static int function_stat_headers(struct seq_file *m)
 {
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       seq_printf(m, "  Function                               "
-                  "Hit    Time            Avg             s^2\n"
-                     "  --------                               "
-                  "---    ----            ---             ---\n");
+       seq_puts(m, "  Function                               "
+                "Hit    Time            Avg             s^2\n"
+                   "  --------                               "
+                "---    ----            ---             ---\n");
 #else
-       seq_printf(m, "  Function                               Hit\n"
-                     "  --------                               ---\n");
+       seq_puts(m, "  Function                               Hit\n"
+                   "  --------                               ---\n");
 #endif
        return 0;
 }
@@ -598,7 +604,7 @@ static int function_stat_show(struct seq_file *m, void *v)
        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       seq_printf(m, "    ");
+       seq_puts(m, "    ");
        avg = rec->time;
        do_div(avg, rec->counter);
 
@@ -1111,6 +1117,43 @@ static struct ftrace_ops global_ops = {
                                          FTRACE_OPS_FL_INITIALIZED,
 };
 
+/*
+ * This is used by __kernel_text_address() to return true if the
+ * address is on a dynamically allocated trampoline that would
+ * not return true for either core_kernel_text() or
+ * is_module_text_address().
+ */
+bool is_ftrace_trampoline(unsigned long addr)
+{
+       struct ftrace_ops *op;
+       bool ret = false;
+
+       /*
+        * Some of the ops may be dynamically allocated,
+        * they are freed after a synchronize_sched().
+        */
+       preempt_disable_notrace();
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /*
+                * This is to check for dynamically allocated trampolines.
+                * Trampolines that are in kernel text will have
+                * core_kernel_text() return true.
+                */
+               if (op->trampoline && op->trampoline_size)
+                       if (addr >= op->trampoline &&
+                           addr < op->trampoline + op->trampoline_size) {
+                               ret = true;
+                               goto out;
+                       }
+       } while_for_each_ftrace_op(op);
+
+ out:
+       preempt_enable_notrace();
+
+       return ret;
+}
+
 struct ftrace_page {
        struct ftrace_page      *next;
        struct dyn_ftrace       *records;
@@ -1315,6 +1358,9 @@ ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
 static void
 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
 
+static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
+                                      struct ftrace_hash *new_hash);
+
 static int
 ftrace_hash_move(struct ftrace_ops *ops, int enable,
                 struct ftrace_hash **dst, struct ftrace_hash *src)
@@ -1325,8 +1371,13 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        struct ftrace_hash *new_hash;
        int size = src->count;
        int bits = 0;
+       int ret;
        int i;
 
+       /* Reject setting notrace hash on IPMODIFY ftrace_ops */
+       if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
+               return -EINVAL;
+
        /*
         * If the new source is empty, just free dst and assign it
         * the empty_hash.
@@ -1360,6 +1411,16 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        }
 
 update:
+       /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
+       if (enable) {
+               /* IPMODIFY should be updated only when filter_hash updating */
+               ret = ftrace_hash_ipmodify_update(ops, new_hash);
+               if (ret < 0) {
+                       free_ftrace_hash(new_hash);
+                       return ret;
+               }
+       }
+
        /*
         * Remove the current set, update the hash and add
         * them back.
@@ -1724,6 +1785,114 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
        ftrace_hash_rec_update_modify(ops, filter_hash, 1);
 }
 
+/*
+ * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
+ * or no-needed to update, -EBUSY if it detects a conflict of the flag
+ * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
+ * Note that old_hash and new_hash has below meanings
+ *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
+ *  - If the hash is EMPTY_HASH, it hits nothing
+ *  - Anything else hits the recs which match the hash entries.
+ */
+static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
+                                        struct ftrace_hash *old_hash,
+                                        struct ftrace_hash *new_hash)
+{
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec, *end = NULL;
+       int in_old, in_new;
+
+       /* Only update if the ops has been registered */
+       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+               return 0;
+
+       if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
+               return 0;
+
+       /*
+        * Since the IPMODIFY is a very address sensitive action, we do not
+        * allow ftrace_ops to set all functions to new hash.
+        */
+       if (!new_hash || !old_hash)
+               return -EINVAL;
+
+       /* Update rec->flags */
+       do_for_each_ftrace_rec(pg, rec) {
+               /* We need to update only differences of filter_hash */
+               in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
+               in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
+               if (in_old == in_new)
+                       continue;
+
+               if (in_new) {
+                       /* New entries must ensure no others are using it */
+                       if (rec->flags & FTRACE_FL_IPMODIFY)
+                               goto rollback;
+                       rec->flags |= FTRACE_FL_IPMODIFY;
+               } else /* Removed entry */
+                       rec->flags &= ~FTRACE_FL_IPMODIFY;
+       } while_for_each_ftrace_rec();
+
+       return 0;
+
+rollback:
+       end = rec;
+
+       /* Roll back what we did above */
+       do_for_each_ftrace_rec(pg, rec) {
+               if (rec == end)
+                       goto err_out;
+
+               in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
+               in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
+               if (in_old == in_new)
+                       continue;
+
+               if (in_new)
+                       rec->flags &= ~FTRACE_FL_IPMODIFY;
+               else
+                       rec->flags |= FTRACE_FL_IPMODIFY;
+       } while_for_each_ftrace_rec();
+
+err_out:
+       return -EBUSY;
+}
+
+static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
+{
+       struct ftrace_hash *hash = ops->func_hash->filter_hash;
+
+       if (ftrace_hash_empty(hash))
+               hash = NULL;
+
+       return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
+}
+
+/* Disabling always succeeds */
+static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
+{
+       struct ftrace_hash *hash = ops->func_hash->filter_hash;
+
+       if (ftrace_hash_empty(hash))
+               hash = NULL;
+
+       __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
+}
+
+static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
+                                      struct ftrace_hash *new_hash)
+{
+       struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
+
+       if (ftrace_hash_empty(old_hash))
+               old_hash = NULL;
+
+       if (ftrace_hash_empty(new_hash))
+               new_hash = NULL;
+
+       return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
+}
+
 static void print_ip_ins(const char *fmt, unsigned char *p)
 {
        int i;
@@ -1734,10 +1903,13 @@ static void print_ip_ins(const char *fmt, unsigned char *p)
                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
 }
 
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+
 /**
  * ftrace_bug - report and shutdown function tracer
  * @failed: The failed type (EFAULT, EINVAL, EPERM)
- * @ip: The address that failed
+ * @rec: The record that failed
  *
  * The arch code that enables or disables the function tracing
  * can call ftrace_bug() when it has detected a problem in
@@ -1746,8 +1918,10 @@ static void print_ip_ins(const char *fmt, unsigned char *p)
  * EINVAL - if what is read at @ip is not what was expected
  * EPERM - if the problem happens on writting to the @ip address
  */
-void ftrace_bug(int failed, unsigned long ip)
+void ftrace_bug(int failed, struct dyn_ftrace *rec)
 {
+       unsigned long ip = rec ? rec->ip : 0;
+
        switch (failed) {
        case -EFAULT:
                FTRACE_WARN_ON_ONCE(1);
@@ -1759,7 +1933,7 @@ void ftrace_bug(int failed, unsigned long ip)
                pr_info("ftrace failed to modify ");
                print_ip_sym(ip);
                print_ip_ins(" actual: ", (unsigned char *)ip);
-               printk(KERN_CONT "\n");
+               pr_cont("\n");
                break;
        case -EPERM:
                FTRACE_WARN_ON_ONCE(1);
@@ -1771,6 +1945,24 @@ void ftrace_bug(int failed, unsigned long ip)
                pr_info("ftrace faulted on unknown error ");
                print_ip_sym(ip);
        }
+       if (rec) {
+               struct ftrace_ops *ops = NULL;
+
+               pr_info("ftrace record flags: %lx\n", rec->flags);
+               pr_cont(" (%ld)%s", ftrace_rec_count(rec),
+                       rec->flags & FTRACE_FL_REGS ? " R" : "  ");
+               if (rec->flags & FTRACE_FL_TRAMP_EN) {
+                       ops = ftrace_find_tramp_ops_any(rec);
+                       if (ops)
+                               pr_cont("\ttramp: %pS",
+                                       (void *)ops->trampoline);
+                       else
+                               pr_cont("\ttramp: ERROR!");
+
+               }
+               ip = ftrace_get_addr_curr(rec);
+               pr_cont(" expected tramp: %lx\n", ip);
+       }
 }
 
 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
@@ -2093,7 +2285,7 @@ void __weak ftrace_replace_code(int enable)
        do_for_each_ftrace_rec(pg, rec) {
                failed = __ftrace_replace_code(rec, enable);
                if (failed) {
-                       ftrace_bug(failed, rec->ip);
+                       ftrace_bug(failed, rec);
                        /* Stop processing */
                        return;
                }
@@ -2175,17 +2367,14 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
 static int
 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
 {
-       unsigned long ip;
        int ret;
 
-       ip = rec->ip;
-
        if (unlikely(ftrace_disabled))
                return 0;
 
        ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
        if (ret) {
-               ftrace_bug(ret, ip);
+               ftrace_bug(ret, rec);
                return 0;
        }
        return 1;
@@ -2320,6 +2509,10 @@ static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
 static ftrace_func_t saved_ftrace_func;
 static int ftrace_start_up;
 
+void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
+{
+}
+
 static void control_ops_free(struct ftrace_ops *ops)
 {
        free_percpu(ops->disabled);
@@ -2369,6 +2562,15 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
         */
        ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
 
+       ret = ftrace_hash_ipmodify_enable(ops);
+       if (ret < 0) {
+               /* Rollback registration process */
+               __unregister_ftrace_function(ops);
+               ftrace_start_up--;
+               ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+               return ret;
+       }
+
        ftrace_hash_rec_enable(ops, 1);
 
        ftrace_startup_enable(command);
@@ -2397,6 +2599,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
         */
        WARN_ON_ONCE(ftrace_start_up < 0);
 
+       /* Disabling ipmodify never fails */
+       ftrace_hash_ipmodify_disable(ops);
        ftrace_hash_rec_disable(ops, 1);
 
        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
@@ -2471,6 +2675,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
        if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
                schedule_on_each_cpu(ftrace_sync);
 
+               arch_ftrace_trampoline_free(ops);
+
                if (ops->flags & FTRACE_OPS_FL_CONTROL)
                        control_ops_free(ops);
        }
@@ -2623,7 +2829,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
                        if (ftrace_start_up && cnt) {
                                int failed = __ftrace_replace_code(p, 1);
                                if (failed)
-                                       ftrace_bug(failed, p->ip);
+                                       ftrace_bug(failed, p);
                        }
                }
        }
@@ -2948,6 +3154,22 @@ static void t_stop(struct seq_file *m, void *p)
        mutex_unlock(&ftrace_lock);
 }
 
+void * __weak
+arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+       return NULL;
+}
+
+static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
+                               struct dyn_ftrace *rec)
+{
+       void *ptr;
+
+       ptr = arch_ftrace_trampoline_func(ops, rec);
+       if (ptr)
+               seq_printf(m, " ->%pS", ptr);
+}
+
 static int t_show(struct seq_file *m, void *v)
 {
        struct ftrace_iterator *iter = m->private;
@@ -2958,9 +3180,9 @@ static int t_show(struct seq_file *m, void *v)
 
        if (iter->flags & FTRACE_ITER_PRINTALL) {
                if (iter->flags & FTRACE_ITER_NOTRACE)
-                       seq_printf(m, "#### no functions disabled ####\n");
+                       seq_puts(m, "#### no functions disabled ####\n");
                else
-                       seq_printf(m, "#### all functions enabled ####\n");
+                       seq_puts(m, "#### all functions enabled ####\n");
                return 0;
        }
 
@@ -2971,22 +3193,25 @@ static int t_show(struct seq_file *m, void *v)
 
        seq_printf(m, "%ps", (void *)rec->ip);
        if (iter->flags & FTRACE_ITER_ENABLED) {
-               seq_printf(m, " (%ld)%s",
+               struct ftrace_ops *ops = NULL;
+
+               seq_printf(m, " (%ld)%s%s",
                           ftrace_rec_count(rec),
-                          rec->flags & FTRACE_FL_REGS ? " R" : "  ");
+                          rec->flags & FTRACE_FL_REGS ? " R" : "  ",
+                          rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
                if (rec->flags & FTRACE_FL_TRAMP_EN) {
-                       struct ftrace_ops *ops;
-
                        ops = ftrace_find_tramp_ops_any(rec);
                        if (ops)
                                seq_printf(m, "\ttramp: %pS",
                                           (void *)ops->trampoline);
                        else
-                               seq_printf(m, "\ttramp: ERROR!");
+                               seq_puts(m, "\ttramp: ERROR!");
+
                }
+               add_trampoline_func(m, ops, rec);
        }       
 
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
 
        return 0;
 }
@@ -3020,9 +3245,6 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
 {
        struct ftrace_iterator *iter;
 
-       if (unlikely(ftrace_disabled))
-               return -ENODEV;
-
        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
        if (iter) {
                iter->pg = ftrace_pages_start;
@@ -3975,6 +4197,9 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
 
+static unsigned long save_global_trampoline;
+static unsigned long save_global_flags;
+
 static int __init set_graph_function(char *str)
 {
        strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
@@ -4183,9 +4408,9 @@ static int g_show(struct seq_file *m, void *v)
                struct ftrace_graph_data *fgd = m->private;
 
                if (fgd->table == ftrace_graph_funcs)
-                       seq_printf(m, "#### all functions enabled ####\n");
+                       seq_puts(m, "#### all functions enabled ####\n");
                else
-                       seq_printf(m, "#### no functions disabled ####\n");
+                       seq_puts(m, "#### no functions disabled ####\n");
                return 0;
        }
 
@@ -4696,6 +4921,32 @@ void __init ftrace_init(void)
        ftrace_disabled = 1;
 }
 
+/* Do nothing if arch does not support this */
+void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
+{
+}
+
+static void ftrace_update_trampoline(struct ftrace_ops *ops)
+{
+
+/*
+ * Currently there's no safe way to free a trampoline when the kernel
+ * is configured with PREEMPT. That is because a task could be preempted
+ * when it jumped to the trampoline, it may be preempted for a long time
+ * depending on the system load, and currently there's no way to know
+ * when it will be off the trampoline. If the trampoline is freed
+ * too early, when the task runs again, it will be executing on freed
+ * memory and crash.
+ */
+#ifdef CONFIG_PREEMPT
+       /* Currently, only non dynamic ops can have a trampoline */
+       if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
+               return;
+#endif
+
+       arch_ftrace_update_trampoline(ops);
+}
+
 #else
 
 static struct ftrace_ops global_ops = {
@@ -4738,6 +4989,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
        return 1;
 }
 
+static void ftrace_update_trampoline(struct ftrace_ops *ops)
+{
+}
+
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 __init void ftrace_init_global_array_ops(struct trace_array *tr)
@@ -5075,12 +5330,12 @@ static int fpid_show(struct seq_file *m, void *v)
        const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
 
        if (v == (void *)1) {
-               seq_printf(m, "no pid\n");
+               seq_puts(m, "no pid\n");
                return 0;
        }
 
        if (fpid->pid == ftrace_swapper_pid)
-               seq_printf(m, "swapper tasks\n");
+               seq_puts(m, "swapper tasks\n");
        else
                seq_printf(m, "%u\n", pid_vnr(fpid->pid));
 
@@ -5293,6 +5548,7 @@ static struct ftrace_ops graph_ops = {
                                   FTRACE_OPS_FL_STUB,
 #ifdef FTRACE_GRAPH_TRAMP_ADDR
        .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
+       /* trampoline_size is only needed for dynamically allocated tramps */
 #endif
        ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
 };
@@ -5522,7 +5778,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        update_function_graph_func();
 
        ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
-
 out:
        mutex_unlock(&ftrace_lock);
        return ret;
@@ -5543,6 +5798,17 @@ void unregister_ftrace_graph(void)
        unregister_pm_notifier(&ftrace_suspend_notifier);
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+       /*
+        * Function graph does not allocate the trampoline, but
+        * other global_ops do. We need to reset the ALLOC_TRAMP flag
+        * if one was used.
+        */
+       global_ops.trampoline = save_global_trampoline;
+       if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
+               global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+#endif
+
  out:
        mutex_unlock(&ftrace_lock);
 }