bpf: Add struct bpf_ksym
authorJiri Olsa <jolsa@kernel.org>
Thu, 12 Mar 2020 19:55:58 +0000 (20:55 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 13 Mar 2020 19:49:51 +0000 (12:49 -0700)
Adding 'struct bpf_ksym' object that will carry the
kallsym information for bpf symbol. Adding the start
and end address to begin with. It will be used by
bpf_prog, bpf_trampoline, bpf_dispatcher objects.

The symbol_start/symbol_end values were originally used
to sort bpf_prog objects. For the address displayed in
/proc/kallsyms we are using prog->bpf_func value.

I'm using the bpf_func value for program symbol start
instead of the symbol_start, because it makes no difference
for sorting bpf_prog objects and we can use it directly as
an address to display it in /proc/kallsyms.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/bpf/20200312195610.346362-4-jolsa@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/core.c

index fe1f8b0..6ca3d5c 100644 (file)
@@ -471,6 +471,11 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
 u64 notrace __bpf_prog_enter(void);
 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
 
+struct bpf_ksym {
+       unsigned long            start;
+       unsigned long            end;
+};
+
 enum bpf_tramp_prog_type {
        BPF_TRAMP_FENTRY,
        BPF_TRAMP_FEXIT,
@@ -653,6 +658,7 @@ struct bpf_prog_aux {
        u32 size_poke_tab;
        struct latch_tree_node ksym_tnode;
        struct list_head ksym_lnode;
+       struct bpf_ksym ksym;
        const struct bpf_prog_ops *ops;
        struct bpf_map **used_maps;
        struct bpf_prog *prog;
index 0f9ca46..e587d63 100644 (file)
@@ -523,18 +523,16 @@ int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
 int bpf_jit_harden   __read_mostly;
 long bpf_jit_limit   __read_mostly;
 
-static __always_inline void
-bpf_get_prog_addr_region(const struct bpf_prog *prog,
-                        unsigned long *symbol_start,
-                        unsigned long *symbol_end)
+static void
+bpf_prog_ksym_set_addr(struct bpf_prog *prog)
 {
        const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
        unsigned long addr = (unsigned long)hdr;
 
        WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
 
-       *symbol_start = addr;
-       *symbol_end   = addr + hdr->pages * PAGE_SIZE;
+       prog->aux->ksym.start = (unsigned long) prog->bpf_func;
+       prog->aux->ksym.end   = addr + hdr->pages * PAGE_SIZE;
 }
 
 void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
@@ -575,13 +573,10 @@ void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
 static __always_inline unsigned long
 bpf_get_prog_addr_start(struct latch_tree_node *n)
 {
-       unsigned long symbol_start, symbol_end;
        const struct bpf_prog_aux *aux;
 
        aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
-       bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
-
-       return symbol_start;
+       return aux->ksym.start;
 }
 
 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
@@ -593,15 +588,13 @@ static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 {
        unsigned long val = (unsigned long)key;
-       unsigned long symbol_start, symbol_end;
        const struct bpf_prog_aux *aux;
 
        aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
-       bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 
-       if (val < symbol_start)
+       if (val < aux->ksym.start)
                return -1;
-       if (val >= symbol_end)
+       if (val >= aux->ksym.end)
                return  1;
 
        return 0;
@@ -649,6 +642,8 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp)
            !capable(CAP_SYS_ADMIN))
                return;
 
+       bpf_prog_ksym_set_addr(fp);
+
        spin_lock_bh(&bpf_lock);
        bpf_prog_ksym_node_add(fp->aux);
        spin_unlock_bh(&bpf_lock);
@@ -677,14 +672,15 @@ static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
                                 unsigned long *off, char *sym)
 {
-       unsigned long symbol_start, symbol_end;
        struct bpf_prog *prog;
        char *ret = NULL;
 
        rcu_read_lock();
        prog = bpf_prog_kallsyms_find(addr);
        if (prog) {
-               bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
+               unsigned long symbol_start = prog->aux->ksym.start;
+               unsigned long symbol_end = prog->aux->ksym.end;
+
                bpf_get_prog_name(prog, sym);
 
                ret = sym;