bpf: allocate cgroup storage entries on attaching bpf programs
authorRoman Gushchin <guro@fb.com>
Thu, 2 Aug 2018 21:27:20 +0000 (14:27 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Thu, 2 Aug 2018 22:47:32 +0000 (00:47 +0200)
If a bpf program is using cgroup local storage, allocate
a bpf_cgroup_storage structure automatically on attaching the program
to a cgroup and save the pointer into the corresponding bpf_prog_list
entry.
Analogically, release the cgroup local storage on detaching
of the bpf program.

Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
include/linux/bpf-cgroup.h
kernel/bpf/cgroup.c

index 9a144dd..f91b0f8 100644 (file)
@@ -43,6 +43,7 @@ struct bpf_cgroup_storage {
 struct bpf_prog_list {
        struct list_head node;
        struct bpf_prog *prog;
+       struct bpf_cgroup_storage *storage;
 };
 
 struct bpf_prog_array;
index badabb0..935274c 100644 (file)
@@ -34,6 +34,8 @@ void cgroup_bpf_put(struct cgroup *cgrp)
                list_for_each_entry_safe(pl, tmp, progs, node) {
                        list_del(&pl->node);
                        bpf_prog_put(pl->prog);
+                       bpf_cgroup_storage_unlink(pl->storage);
+                       bpf_cgroup_storage_free(pl->storage);
                        kfree(pl);
                        static_branch_dec(&cgroup_bpf_enabled_key);
                }
@@ -188,6 +190,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
 {
        struct list_head *progs = &cgrp->bpf.progs[type];
        struct bpf_prog *old_prog = NULL;
+       struct bpf_cgroup_storage *storage, *old_storage = NULL;
        struct cgroup_subsys_state *css;
        struct bpf_prog_list *pl;
        bool pl_was_allocated;
@@ -210,31 +213,47 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
        if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
                return -E2BIG;
 
+       storage = bpf_cgroup_storage_alloc(prog);
+       if (IS_ERR(storage))
+               return -ENOMEM;
+
        if (flags & BPF_F_ALLOW_MULTI) {
-               list_for_each_entry(pl, progs, node)
-                       if (pl->prog == prog)
+               list_for_each_entry(pl, progs, node) {
+                       if (pl->prog == prog) {
                                /* disallow attaching the same prog twice */
+                               bpf_cgroup_storage_free(storage);
                                return -EINVAL;
+                       }
+               }
 
                pl = kmalloc(sizeof(*pl), GFP_KERNEL);
-               if (!pl)
+               if (!pl) {
+                       bpf_cgroup_storage_free(storage);
                        return -ENOMEM;
+               }
+
                pl_was_allocated = true;
                pl->prog = prog;
+               pl->storage = storage;
                list_add_tail(&pl->node, progs);
        } else {
                if (list_empty(progs)) {
                        pl = kmalloc(sizeof(*pl), GFP_KERNEL);
-                       if (!pl)
+                       if (!pl) {
+                               bpf_cgroup_storage_free(storage);
                                return -ENOMEM;
+                       }
                        pl_was_allocated = true;
                        list_add_tail(&pl->node, progs);
                } else {
                        pl = list_first_entry(progs, typeof(*pl), node);
                        old_prog = pl->prog;
+                       old_storage = pl->storage;
+                       bpf_cgroup_storage_unlink(old_storage);
                        pl_was_allocated = false;
                }
                pl->prog = prog;
+               pl->storage = storage;
        }
 
        cgrp->bpf.flags[type] = flags;
@@ -257,10 +276,13 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
        }
 
        static_branch_inc(&cgroup_bpf_enabled_key);
+       if (old_storage)
+               bpf_cgroup_storage_free(old_storage);
        if (old_prog) {
                bpf_prog_put(old_prog);
                static_branch_dec(&cgroup_bpf_enabled_key);
        }
+       bpf_cgroup_storage_link(storage, cgrp, type);
        return 0;
 
 cleanup:
@@ -276,6 +298,9 @@ cleanup:
 
        /* and cleanup the prog list */
        pl->prog = old_prog;
+       bpf_cgroup_storage_free(pl->storage);
+       pl->storage = old_storage;
+       bpf_cgroup_storage_link(old_storage, cgrp, type);
        if (pl_was_allocated) {
                list_del(&pl->node);
                kfree(pl);
@@ -356,6 +381,8 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
 
        /* now can actually delete it from this cgroup list */
        list_del(&pl->node);
+       bpf_cgroup_storage_unlink(pl->storage);
+       bpf_cgroup_storage_free(pl->storage);
        kfree(pl);
        if (list_empty(progs))
                /* last program was detached, reset flags to zero */