kprobes: Add symbols for kprobe insn pages
authorAdrian Hunter <adrian.hunter@intel.com>
Thu, 28 May 2020 08:00:58 +0000 (11:00 +0300)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 15 Jun 2020 12:09:48 +0000 (14:09 +0200)
Symbols are needed for tools to describe instruction addresses. Pages
allocated for kprobe's purposes need symbols to be created for them.
Add such symbols to be visible via /proc/kallsyms.

Note: kprobe insn pages are not used if ftrace is configured. To see the
effect of this patch, the kernel must be configured with:

# CONFIG_FUNCTION_TRACER is not set
CONFIG_KPROBES=y

and for optimised kprobes:

CONFIG_OPTPROBES=y

Example on x86:

# perf probe __schedule
Added new event:
  probe:__schedule     (on __schedule)
# cat /proc/kallsyms | grep '\[__builtin__kprobes\]'
ffffffffc00d4000 t kprobe_insn_page     [__builtin__kprobes]
ffffffffc00d6000 t kprobe_optinsn_page  [__builtin__kprobes]

Note: This patch adds "__builtin__kprobes" as a module name in
/proc/kallsyms for symbols for pages allocated for kprobes' purposes, even
though "__builtin__kprobes" is not a module.

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Link: https://lkml.kernel.org/r/20200528080058.20230-1-adrian.hunter@intel.com
include/linux/kprobes.h
kernel/kallsyms.c
kernel/kprobes.c

index 594265b..13fc58a 100644 (file)
@@ -242,6 +242,7 @@ struct kprobe_insn_cache {
        struct mutex mutex;
        void *(*alloc)(void);   /* allocate insn page */
        void (*free)(void *);   /* free insn page */
+       const char *sym;        /* symbol for insn pages */
        struct list_head pages; /* list of kprobe_insn_page */
        size_t insn_size;       /* size of instruction slot */
        int nr_garbage;
@@ -272,6 +273,10 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr)   \
 {                                                                      \
        return __is_insn_slot_addr(&kprobe_##__name##_slots, addr);     \
 }
+#define KPROBE_INSN_PAGE_SYM           "kprobe_insn_page"
+#define KPROBE_OPTINSN_PAGE_SYM                "kprobe_optinsn_page"
+int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
+                            unsigned long *value, char *type, char *sym);
 #else /* __ARCH_WANT_KPROBES_INSN_SLOT */
 #define DEFINE_INSN_CACHE_OPS(__name)                                  \
 static inline bool is_kprobe_##__name##_slot(unsigned long addr)       \
@@ -373,6 +378,11 @@ void dump_kprobe(struct kprobe *kp);
 void *alloc_insn_page(void);
 void free_insn_page(void *page);
 
+int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+                      char *sym);
+
+int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
+                           char *type, char *sym);
 #else /* !CONFIG_KPROBES: */
 
 static inline int kprobes_built_in(void)
@@ -435,6 +445,11 @@ static inline bool within_kprobe_blacklist(unsigned long addr)
 {
        return true;
 }
+static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
+                                    char *type, char *sym)
+{
+       return -ERANGE;
+}
 #endif /* CONFIG_KPROBES */
 static inline int disable_kretprobe(struct kretprobe *rp)
 {
index 16c8c60..c6cc293 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/filter.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 #include <linux/compiler.h>
 
 /*
@@ -437,6 +438,7 @@ struct kallsym_iter {
        loff_t pos_arch_end;
        loff_t pos_mod_end;
        loff_t pos_ftrace_mod_end;
+       loff_t pos_bpf_end;
        unsigned long value;
        unsigned int nameoff; /* If iterating in core kernel symbols. */
        char type;
@@ -496,11 +498,33 @@ static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
 
 static int get_ksymbol_bpf(struct kallsym_iter *iter)
 {
+       int ret;
+
        strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
        iter->exported = 0;
-       return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
-                              &iter->value, &iter->type,
-                              iter->name) < 0 ? 0 : 1;
+       ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
+                             &iter->value, &iter->type,
+                             iter->name);
+       if (ret < 0) {
+               iter->pos_bpf_end = iter->pos;
+               return 0;
+       }
+
+       return 1;
+}
+
+/*
+ * This uses "__builtin__kprobes" as a module name for symbols for pages
+ * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a
+ * module.
+ */
+static int get_ksymbol_kprobe(struct kallsym_iter *iter)
+{
+       strlcpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN);
+       iter->exported = 0;
+       return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end,
+                                 &iter->value, &iter->type,
+                                 iter->name) < 0 ? 0 : 1;
 }
 
 /* Returns space to next name. */
@@ -527,6 +551,7 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
                iter->pos_arch_end = 0;
                iter->pos_mod_end = 0;
                iter->pos_ftrace_mod_end = 0;
+               iter->pos_bpf_end = 0;
        }
 }
 
@@ -551,7 +576,11 @@ static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
            get_ksymbol_ftrace_mod(iter))
                return 1;
 
-       return get_ksymbol_bpf(iter);
+       if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) &&
+           get_ksymbol_bpf(iter))
+               return 1;
+
+       return get_ksymbol_kprobe(iter);
 }
 
 /* Returns false if pos at or past end of file. */
index 50cd84f..058c0be 100644 (file)
@@ -118,6 +118,7 @@ struct kprobe_insn_cache kprobe_insn_slots = {
        .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
        .alloc = alloc_insn_page,
        .free = free_insn_page,
+       .sym = KPROBE_INSN_PAGE_SYM,
        .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
        .insn_size = MAX_INSN_SIZE,
        .nr_garbage = 0,
@@ -290,12 +291,34 @@ bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
        return ret;
 }
 
+int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
+                            unsigned long *value, char *type, char *sym)
+{
+       struct kprobe_insn_page *kip;
+       int ret = -ERANGE;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(kip, &c->pages, list) {
+               if ((*symnum)--)
+                       continue;
+               strlcpy(sym, c->sym, KSYM_NAME_LEN);
+               *type = 't';
+               *value = (unsigned long)kip->insns;
+               ret = 0;
+               break;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 #ifdef CONFIG_OPTPROBES
 /* For optimized_kprobe buffer */
 struct kprobe_insn_cache kprobe_optinsn_slots = {
        .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
        .alloc = alloc_insn_page,
        .free = free_insn_page,
+       .sym = KPROBE_OPTINSN_PAGE_SYM,
        .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
        /* .insn_size is initialized later */
        .nr_garbage = 0,
@@ -2197,6 +2220,28 @@ static void kprobe_remove_ksym_blacklist(unsigned long entry)
        kprobe_remove_area_blacklist(entry, entry + 1);
 }
 
+int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
+                                  char *type, char *sym)
+{
+       return -ERANGE;
+}
+
+int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+                      char *sym)
+{
+#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
+       if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
+               return 0;
+#ifdef CONFIG_OPTPROBES
+       if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
+               return 0;
+#endif
+#endif
+       if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
+               return 0;
+       return -ERANGE;
+}
+
 int __init __weak arch_populate_kprobe_blacklist(void)
 {
        return 0;