drm/amdgpu/ttm: handle tt moves properly.
[linux-2.6-microblaze.git] / kernel / kprobes.c
index 2e97feb..287b263 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
 #include <linux/jump_label.h>
+#include <linux/perf_event.h>
 
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
@@ -123,6 +124,7 @@ struct kprobe_insn_cache kprobe_insn_slots = {
        .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
        .alloc = alloc_insn_page,
        .free = free_insn_page,
+       .sym = KPROBE_INSN_PAGE_SYM,
        .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
        .insn_size = MAX_INSN_SIZE,
        .nr_garbage = 0,
@@ -188,6 +190,10 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
        kip->cache = c;
        list_add_rcu(&kip->list, &c->pages);
        slot = kip->insns;
+
+       /* Record the perf ksymbol register event after adding the page */
+       perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
+                          PAGE_SIZE, false, c->sym);
 out:
        mutex_unlock(&c->mutex);
        return slot;
@@ -206,6 +212,13 @@ static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
                 * next time somebody inserts a probe.
                 */
                if (!list_is_singular(&kip->list)) {
+                       /*
+                        * Record perf ksymbol unregister event before removing
+                        * the page.
+                        */
+                       perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
+                                          (unsigned long)kip->insns, PAGE_SIZE, true,
+                                          kip->cache->sym);
                        list_del_rcu(&kip->list);
                        synchronize_rcu();
                        kip->cache->free(kip->insns);
@@ -295,12 +308,34 @@ bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
        return ret;
 }
 
+int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
+                            unsigned long *value, char *type, char *sym)
+{
+       struct kprobe_insn_page *kip;
+       int ret = -ERANGE;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(kip, &c->pages, list) {
+               if ((*symnum)--)
+                       continue;
+               strlcpy(sym, c->sym, KSYM_NAME_LEN);
+               *type = 't';
+               *value = (unsigned long)kip->insns;
+               ret = 0;
+               break;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 #ifdef CONFIG_OPTPROBES
 /* For optimized_kprobe buffer */
 struct kprobe_insn_cache kprobe_optinsn_slots = {
        .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
        .alloc = alloc_insn_page,
        .free = free_insn_page,
+       .sym = KPROBE_OPTINSN_PAGE_SYM,
        .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
        /* .insn_size is initialized later */
        .nr_garbage = 0,
@@ -563,8 +598,6 @@ static void kprobe_optimizer(struct work_struct *work)
        mutex_lock(&kprobe_mutex);
        cpus_read_lock();
        mutex_lock(&text_mutex);
-       /* Lock modules while optimizing kprobes */
-       mutex_lock(&module_mutex);
 
        /*
         * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
@@ -589,7 +622,6 @@ static void kprobe_optimizer(struct work_struct *work)
        /* Step 4: Free cleaned kprobes after quiesence period */
        do_free_cleaned_kprobes();
 
-       mutex_unlock(&module_mutex);
        mutex_unlock(&text_mutex);
        cpus_read_unlock();
 
@@ -1079,9 +1111,20 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
                ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
 }
 #else  /* !CONFIG_KPROBES_ON_FTRACE */
-#define prepare_kprobe(p)      arch_prepare_kprobe(p)
-#define arm_kprobe_ftrace(p)   (-ENODEV)
-#define disarm_kprobe_ftrace(p)        (-ENODEV)
+static inline int prepare_kprobe(struct kprobe *p)
+{
+       return arch_prepare_kprobe(p);
+}
+
+static inline int arm_kprobe_ftrace(struct kprobe *p)
+{
+       return -ENODEV;
+}
+
+static inline int disarm_kprobe_ftrace(struct kprobe *p)
+{
+       return -ENODEV;
+}
 #endif
 
 /* Arm a kprobe with text_mutex */
@@ -2113,6 +2156,13 @@ static void kill_kprobe(struct kprobe *p)
         * the original probed function (which will be freed soon) any more.
         */
        arch_remove_kprobe(p);
+
+       /*
+        * The module is going away. We should disarm the kprobe which
+        * is using ftrace.
+        */
+       if (kprobe_ftrace(p))
+               disarm_kprobe_ftrace(p);
 }
 
 /* Disable one kprobe */
@@ -2232,6 +2282,28 @@ static void kprobe_remove_ksym_blacklist(unsigned long entry)
        kprobe_remove_area_blacklist(entry, entry + 1);
 }
 
+int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
+                                  char *type, char *sym)
+{
+       return -ERANGE;
+}
+
+int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+                      char *sym)
+{
+#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
+       if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
+               return 0;
+#ifdef CONFIG_OPTPROBES
+       if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
+               return 0;
+#endif
+#endif
+       if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
+               return 0;
+       return -ERANGE;
+}
+
 int __init __weak arch_populate_kprobe_blacklist(void)
 {
        return 0;