Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-microblaze.git] / kernel / bpf / cpumap.c
index c61a23b..7473136 100644 (file)
@@ -84,8 +84,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
        u32 value_size = attr->value_size;
        struct bpf_cpu_map *cmap;
        int err = -ENOMEM;
-       u64 cost;
-       int ret;
 
        if (!bpf_capable())
                return ERR_PTR(-EPERM);
@@ -97,7 +95,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
            attr->map_flags & ~BPF_F_NUMA_NODE)
                return ERR_PTR(-EINVAL);
 
-       cmap = kzalloc(sizeof(*cmap), GFP_USER);
+       cmap = kzalloc(sizeof(*cmap), GFP_USER | __GFP_ACCOUNT);
        if (!cmap)
                return ERR_PTR(-ENOMEM);
 
@@ -109,26 +107,14 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
                goto free_cmap;
        }
 
-       /* make sure page count doesn't overflow */
-       cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
-
-       /* Notice returns -EPERM on if map size is larger than memlock limit */
-       ret = bpf_map_charge_init(&cmap->map.memory, cost);
-       if (ret) {
-               err = ret;
-               goto free_cmap;
-       }
-
        /* Alloc array for possible remote "destination" CPUs */
        cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
                                           sizeof(struct bpf_cpu_map_entry *),
                                           cmap->map.numa_node);
        if (!cmap->cpu_map)
-               goto free_charge;
+               goto free_cmap;
 
        return &cmap->map;
-free_charge:
-       bpf_map_charge_finish(&cmap->map.memory);
 free_cmap:
        kfree(cmap);
        return ERR_PTR(err);
@@ -412,7 +398,8 @@ static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
 }
 
 static struct bpf_cpu_map_entry *
-__cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
+__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
+                     u32 cpu)
 {
        int numa, err, i, fd = value->bpf_prog.fd;
        gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
@@ -422,13 +409,13 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
        /* Have map->numa_node, but choose node of redirect target CPU */
        numa = cpu_to_node(cpu);
 
-       rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa);
+       rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
        if (!rcpu)
                return NULL;
 
        /* Alloc percpu bulkq */
-       rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq),
-                                        sizeof(void *), gfp);
+       rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
+                                          sizeof(void *), gfp);
        if (!rcpu->bulkq)
                goto free_rcu;
 
@@ -438,7 +425,8 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
        }
 
        /* Alloc queue */
-       rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa);
+       rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
+                                          numa);
        if (!rcpu->queue)
                goto free_bulkq;
 
@@ -447,7 +435,7 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
                goto free_queue;
 
        rcpu->cpu    = cpu;
-       rcpu->map_id = map_id;
+       rcpu->map_id = map->id;
        rcpu->value.qsize  = value->qsize;
 
        if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd))
@@ -455,7 +443,8 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
 
        /* Setup kthread */
        rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
-                                              "cpumap/%d/map:%d", cpu, map_id);
+                                              "cpumap/%d/map:%d", cpu,
+                                              map->id);
        if (IS_ERR(rcpu->kthread))
                goto free_prog;
 
@@ -571,7 +560,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
                rcpu = NULL; /* Same as deleting */
        } else {
                /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
-               rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id);
+               rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
                if (!rcpu)
                        return -ENOMEM;
                rcpu->cmap = cmap;