bpf: Eliminate rlimit-based memory accounting for arraymap maps
authorRoman Gushchin <guro@fb.com>
Tue, 1 Dec 2020 21:58:44 +0000 (13:58 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 3 Dec 2020 02:32:46 +0000 (18:32 -0800)
Do not use rlimit-based memory accounting for arraymap maps.
It has been replaced with the memcg-based memory accounting.

Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/bpf/20201201215900.3569844-19-guro@fb.com
kernel/bpf/arraymap.c

index d837e06..1f84533 100644 (file)
@@ -81,11 +81,10 @@ int array_map_alloc_check(union bpf_attr *attr)
 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 {
        bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
-       int ret, numa_node = bpf_map_attr_numa_node(attr);
+       int numa_node = bpf_map_attr_numa_node(attr);
        u32 elem_size, index_mask, max_entries;
        bool bypass_spec_v1 = bpf_bypass_spec_v1();
-       u64 cost, array_size, mask64;
-       struct bpf_map_memory mem;
+       u64 array_size, mask64;
        struct bpf_array *array;
 
        elem_size = round_up(attr->value_size, 8);
@@ -126,44 +125,29 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
                }
        }
 
-       /* make sure there is no u32 overflow later in round_up() */
-       cost = array_size;
-       if (percpu)
-               cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
-
-       ret = bpf_map_charge_init(&mem, cost);
-       if (ret < 0)
-               return ERR_PTR(ret);
-
        /* allocate all map elements and zero-initialize them */
        if (attr->map_flags & BPF_F_MMAPABLE) {
                void *data;
 
                /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
                data = bpf_map_area_mmapable_alloc(array_size, numa_node);
-               if (!data) {
-                       bpf_map_charge_finish(&mem);
+               if (!data)
                        return ERR_PTR(-ENOMEM);
-               }
                array = data + PAGE_ALIGN(sizeof(struct bpf_array))
                        - offsetof(struct bpf_array, value);
        } else {
                array = bpf_map_area_alloc(array_size, numa_node);
        }
-       if (!array) {
-               bpf_map_charge_finish(&mem);
+       if (!array)
                return ERR_PTR(-ENOMEM);
-       }
        array->index_mask = index_mask;
        array->map.bypass_spec_v1 = bypass_spec_v1;
 
        /* copy mandatory map attributes */
        bpf_map_init_from_attr(&array->map, attr);
-       bpf_map_charge_move(&array->map.memory, &mem);
        array->elem_size = elem_size;
 
        if (percpu && bpf_array_alloc_percpu(array)) {
-               bpf_map_charge_finish(&array->map.memory);
                bpf_map_area_free(array);
                return ERR_PTR(-ENOMEM);
        }