bpf: Drop always true do_idr_lock parameter to bpf_map_free_id
authorTobias Klauser <tklauser@distanz.ch>
Thu, 2 Feb 2023 14:19:21 +0000 (15:19 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 3 Feb 2023 04:26:12 +0000 (20:26 -0800)
The do_idr_lock parameter to bpf_map_free_id was introduced by commit
bd5f5f4ecb78 ("bpf: Add BPF_MAP_GET_FD_BY_ID"). However, all callers set
do_idr_lock = true since commit 1e0bd5a091e5 ("bpf: Switch bpf_map ref
counter to atomic64_t so bpf_map_inc() never fails").

While at it also inline __bpf_map_put into its only caller bpf_map_put
now that do_idr_lock can be dropped from its signature.

Signed-off-by: Tobias Klauser <tklauser@distanz.ch>
Link: https://lore.kernel.org/r/20230202141921.4424-1-tklauser@distanz.ch
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/offload.c
kernel/bpf/syscall.c

index e11db75..35c18a9 100644 (file)
@@ -1846,7 +1846,7 @@ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
 void bpf_prog_put(struct bpf_prog *prog);
 
 void bpf_prog_free_id(struct bpf_prog *prog);
-void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+void bpf_map_free_id(struct bpf_map *map);
 
 struct btf_field *btf_record_find(const struct btf_record *rec,
                                  u32 offset, enum btf_field_type type);
index 88aae38..0c85e06 100644 (file)
@@ -136,7 +136,7 @@ static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
 {
        WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
        /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
-       bpf_map_free_id(&offmap->map, true);
+       bpf_map_free_id(&offmap->map);
        list_del_init(&offmap->offloads);
        offmap->netdev = NULL;
 }
index 99417b3..bcc9761 100644 (file)
@@ -390,7 +390,7 @@ static int bpf_map_alloc_id(struct bpf_map *map)
        return id > 0 ? 0 : id;
 }
 
-void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_free_id(struct bpf_map *map)
 {
        unsigned long flags;
 
@@ -402,18 +402,12 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
        if (!map->id)
                return;
 
-       if (do_idr_lock)
-               spin_lock_irqsave(&map_idr_lock, flags);
-       else
-               __acquire(&map_idr_lock);
+       spin_lock_irqsave(&map_idr_lock, flags);
 
        idr_remove(&map_idr, map->id);
        map->id = 0;
 
-       if (do_idr_lock)
-               spin_unlock_irqrestore(&map_idr_lock, flags);
-       else
-               __release(&map_idr_lock);
+       spin_unlock_irqrestore(&map_idr_lock, flags);
 }
 
 #ifdef CONFIG_MEMCG_KMEM
@@ -706,13 +700,13 @@ static void bpf_map_put_uref(struct bpf_map *map)
 }
 
 /* decrement map refcnt and schedule it for freeing via workqueue
- * (unrelying map implementation ops->map_free() might sleep)
+ * (underlying map implementation ops->map_free() might sleep)
  */
-static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
+void bpf_map_put(struct bpf_map *map)
 {
        if (atomic64_dec_and_test(&map->refcnt)) {
                /* bpf_map_free_id() must be called first */
-               bpf_map_free_id(map, do_idr_lock);
+               bpf_map_free_id(map);
                btf_put(map->btf);
                INIT_WORK(&map->work, bpf_map_free_deferred);
                /* Avoid spawning kworkers, since they all might contend
@@ -721,11 +715,6 @@ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
                queue_work(system_unbound_wq, &map->work);
        }
 }
-
-void bpf_map_put(struct bpf_map *map)
-{
-       __bpf_map_put(map, true);
-}
 EXPORT_SYMBOL_GPL(bpf_map_put);
 
 void bpf_map_put_with_uref(struct bpf_map *map)