bpf: Eliminate rlimit-based memory accounting for cgroup storage maps
authorRoman Gushchin <guro@fb.com>
Tue, 1 Dec 2020 21:58:47 +0000 (13:58 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 3 Dec 2020 02:32:46 +0000 (18:32 -0800)
Do not use rlimit-based memory accounting for cgroup storage maps.
It has been replaced with the memcg-based memory accounting.

Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/bpf/20201201215900.3569844-22-guro@fb.com
kernel/bpf/local_storage.c

index 74dcee8..2d4f9ac 100644 (file)
@@ -287,8 +287,6 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
 {
        int numa_node = bpf_map_attr_numa_node(attr);
        struct bpf_cgroup_storage_map *map;
-       struct bpf_map_memory mem;
-       int ret;
 
        if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) &&
            attr->key_size != sizeof(__u64))
@@ -308,18 +306,10 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
                /* max_entries is not used and enforced to be 0 */
                return ERR_PTR(-EINVAL);
 
-       ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
-       if (ret < 0)
-               return ERR_PTR(ret);
-
        map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
                           __GFP_ZERO | GFP_USER | __GFP_ACCOUNT, numa_node);
-       if (!map) {
-               bpf_map_charge_finish(&mem);
+       if (!map)
                return ERR_PTR(-ENOMEM);
-       }
-
-       bpf_map_charge_move(&map->map.memory, &mem);
 
        /* copy mandatory map attributes */
        bpf_map_init_from_attr(&map->map, attr);
@@ -508,9 +498,6 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
 
        size = bpf_cgroup_storage_calculate_size(map, &pages);
 
-       if (bpf_map_charge_memlock(map, pages))
-               return ERR_PTR(-EPERM);
-
        storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage),
                                       gfp, map->numa_node);
        if (!storage)
@@ -533,7 +520,6 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
        return storage;
 
 enomem:
-       bpf_map_uncharge_memlock(map, pages);
        kfree(storage);
        return ERR_PTR(-ENOMEM);
 }
@@ -560,16 +546,11 @@ void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
 {
        enum bpf_cgroup_storage_type stype;
        struct bpf_map *map;
-       u32 pages;
 
        if (!storage)
                return;
 
        map = &storage->map->map;
-
-       bpf_cgroup_storage_calculate_size(map, &pages);
-       bpf_map_uncharge_memlock(map, pages);
-
        stype = cgroup_storage_type(map);
        if (stype == BPF_CGROUP_STORAGE_SHARED)
                call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);