drm/xe/bo: Gracefully handle errors from ttm_bo_move_accel_cleanup().
[linux-2.6-microblaze.git] / kernel / bpf / bpf_local_storage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rcupdate_trace.h>
16 #include <linux/rcupdate_wait.h>
17
18 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19
20 static struct bpf_local_storage_map_bucket *
21 select_bucket(struct bpf_local_storage_map *smap,
22               struct bpf_local_storage_elem *selem)
23 {
24         return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25 }
26
27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28 {
29         struct bpf_map *map = &smap->map;
30
31         if (!map->ops->map_local_storage_charge)
32                 return 0;
33
34         return map->ops->map_local_storage_charge(smap, owner, size);
35 }
36
37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38                          u32 size)
39 {
40         struct bpf_map *map = &smap->map;
41
42         if (map->ops->map_local_storage_uncharge)
43                 map->ops->map_local_storage_uncharge(smap, owner, size);
44 }
45
46 static struct bpf_local_storage __rcu **
47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
48 {
49         struct bpf_map *map = &smap->map;
50
51         return map->ops->map_owner_storage_ptr(owner);
52 }
53
54 static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
55 {
56         return !hlist_unhashed_lockless(&selem->snode);
57 }
58
59 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
60 {
61         return !hlist_unhashed(&selem->snode);
62 }
63
64 static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
65 {
66         return !hlist_unhashed_lockless(&selem->map_node);
67 }
68
69 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
70 {
71         return !hlist_unhashed(&selem->map_node);
72 }
73
74 struct bpf_local_storage_elem *
75 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76                 void *value, bool charge_mem, gfp_t gfp_flags)
77 {
78         struct bpf_local_storage_elem *selem;
79
80         if (charge_mem && mem_charge(smap, owner, smap->elem_size))
81                 return NULL;
82
83         if (smap->bpf_ma) {
84                 migrate_disable();
85                 selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
86                 migrate_enable();
87                 if (selem)
88                         /* Keep the original bpf_map_kzalloc behavior
89                          * before started using the bpf_mem_cache_alloc.
90                          *
91                          * No need to use zero_map_value. The bpf_selem_free()
92                          * only does bpf_mem_cache_free when there is
93                          * no other bpf prog is using the selem.
94                          */
95                         memset(SDATA(selem)->data, 0, smap->map.value_size);
96         } else {
97                 selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
98                                         gfp_flags | __GFP_NOWARN);
99         }
100
101         if (selem) {
102                 if (value)
103                         copy_map_value(&smap->map, SDATA(selem)->data, value);
104                 /* No need to call check_and_init_map_value as memory is zero init */
105                 return selem;
106         }
107
108         if (charge_mem)
109                 mem_uncharge(smap, owner, smap->elem_size);
110
111         return NULL;
112 }
113
114 /* rcu tasks trace callback for bpf_ma == false */
115 static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
116 {
117         struct bpf_local_storage *local_storage;
118
119         /* If RCU Tasks Trace grace period implies RCU grace period, do
120          * kfree(), else do kfree_rcu().
121          */
122         local_storage = container_of(rcu, struct bpf_local_storage, rcu);
123         if (rcu_trace_implies_rcu_gp())
124                 kfree(local_storage);
125         else
126                 kfree_rcu(local_storage, rcu);
127 }
128
129 static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
130 {
131         struct bpf_local_storage *local_storage;
132
133         local_storage = container_of(rcu, struct bpf_local_storage, rcu);
134         bpf_mem_cache_raw_free(local_storage);
135 }
136
137 static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
138 {
139         if (rcu_trace_implies_rcu_gp())
140                 bpf_local_storage_free_rcu(rcu);
141         else
142                 call_rcu(rcu, bpf_local_storage_free_rcu);
143 }
144
145 /* Handle bpf_ma == false */
146 static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
147                                      bool vanilla_rcu)
148 {
149         if (vanilla_rcu)
150                 kfree_rcu(local_storage, rcu);
151         else
152                 call_rcu_tasks_trace(&local_storage->rcu,
153                                      __bpf_local_storage_free_trace_rcu);
154 }
155
156 static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
157                                    struct bpf_local_storage_map *smap,
158                                    bool bpf_ma, bool reuse_now)
159 {
160         if (!local_storage)
161                 return;
162
163         if (!bpf_ma) {
164                 __bpf_local_storage_free(local_storage, reuse_now);
165                 return;
166         }
167
168         if (!reuse_now) {
169                 call_rcu_tasks_trace(&local_storage->rcu,
170                                      bpf_local_storage_free_trace_rcu);
171                 return;
172         }
173
174         if (smap) {
175                 migrate_disable();
176                 bpf_mem_cache_free(&smap->storage_ma, local_storage);
177                 migrate_enable();
178         } else {
179                 /* smap could be NULL if the selem that triggered
180                  * this 'local_storage' creation had been long gone.
181                  * In this case, directly do call_rcu().
182                  */
183                 call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
184         }
185 }
186
187 /* rcu tasks trace callback for bpf_ma == false */
188 static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
189 {
190         struct bpf_local_storage_elem *selem;
191
192         selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
193         if (rcu_trace_implies_rcu_gp())
194                 kfree(selem);
195         else
196                 kfree_rcu(selem, rcu);
197 }
198
199 /* Handle bpf_ma == false */
200 static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
201                              bool vanilla_rcu)
202 {
203         if (vanilla_rcu)
204                 kfree_rcu(selem, rcu);
205         else
206                 call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
207 }
208
209 static void bpf_selem_free_rcu(struct rcu_head *rcu)
210 {
211         struct bpf_local_storage_elem *selem;
212
213         selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
214         bpf_mem_cache_raw_free(selem);
215 }
216
217 static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
218 {
219         if (rcu_trace_implies_rcu_gp())
220                 bpf_selem_free_rcu(rcu);
221         else
222                 call_rcu(rcu, bpf_selem_free_rcu);
223 }
224
225 void bpf_selem_free(struct bpf_local_storage_elem *selem,
226                     struct bpf_local_storage_map *smap,
227                     bool reuse_now)
228 {
229         bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
230
231         if (!smap->bpf_ma) {
232                 __bpf_selem_free(selem, reuse_now);
233                 return;
234         }
235
236         if (!reuse_now) {
237                 call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
238         } else {
239                 /* Instead of using the vanilla call_rcu(),
240                  * bpf_mem_cache_free will be able to reuse selem
241                  * immediately.
242                  */
243                 migrate_disable();
244                 bpf_mem_cache_free(&smap->selem_ma, selem);
245                 migrate_enable();
246         }
247 }
248
249 /* local_storage->lock must be held and selem->local_storage == local_storage.
250  * The caller must ensure selem->smap is still valid to be
251  * dereferenced for its smap->elem_size and smap->cache_idx.
252  */
253 static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
254                                             struct bpf_local_storage_elem *selem,
255                                             bool uncharge_mem, bool reuse_now)
256 {
257         struct bpf_local_storage_map *smap;
258         bool free_local_storage;
259         void *owner;
260
261         smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
262         owner = local_storage->owner;
263
264         /* All uncharging on the owner must be done first.
265          * The owner may be freed once the last selem is unlinked
266          * from local_storage.
267          */
268         if (uncharge_mem)
269                 mem_uncharge(smap, owner, smap->elem_size);
270
271         free_local_storage = hlist_is_singular_node(&selem->snode,
272                                                     &local_storage->list);
273         if (free_local_storage) {
274                 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
275                 local_storage->owner = NULL;
276
277                 /* After this RCU_INIT, owner may be freed and cannot be used */
278                 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
279
280                 /* local_storage is not freed now.  local_storage->lock is
281                  * still held and raw_spin_unlock_bh(&local_storage->lock)
282                  * will be done by the caller.
283                  *
284                  * Although the unlock will be done under
285                  * rcu_read_lock(),  it is more intuitive to
286                  * read if the freeing of the storage is done
287                  * after the raw_spin_unlock_bh(&local_storage->lock).
288                  *
289                  * Hence, a "bool free_local_storage" is returned
290                  * to the caller which then calls then frees the storage after
291                  * all the RCU grace periods have expired.
292                  */
293         }
294         hlist_del_init_rcu(&selem->snode);
295         if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
296             SDATA(selem))
297                 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
298
299         bpf_selem_free(selem, smap, reuse_now);
300
301         if (rcu_access_pointer(local_storage->smap) == smap)
302                 RCU_INIT_POINTER(local_storage->smap, NULL);
303
304         return free_local_storage;
305 }
306
307 static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
308                                  struct bpf_local_storage_map *storage_smap,
309                                  struct bpf_local_storage_elem *selem)
310 {
311
312         struct bpf_local_storage_map *selem_smap;
313
314         /* local_storage->smap may be NULL. If it is, get the bpf_ma
315          * from any selem in the local_storage->list. The bpf_ma of all
316          * local_storage and selem should have the same value
317          * for the same map type.
318          *
319          * If the local_storage->list is already empty, the caller will not
320          * care about the bpf_ma value also because the caller is not
321          * responsibile to free the local_storage.
322          */
323
324         if (storage_smap)
325                 return storage_smap->bpf_ma;
326
327         if (!selem) {
328                 struct hlist_node *n;
329
330                 n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
331                                           bpf_rcu_lock_held());
332                 if (!n)
333                         return false;
334
335                 selem = hlist_entry(n, struct bpf_local_storage_elem, snode);
336         }
337         selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
338
339         return selem_smap->bpf_ma;
340 }
341
342 static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
343                                      bool reuse_now)
344 {
345         struct bpf_local_storage_map *storage_smap;
346         struct bpf_local_storage *local_storage;
347         bool bpf_ma, free_local_storage = false;
348         unsigned long flags;
349
350         if (unlikely(!selem_linked_to_storage_lockless(selem)))
351                 /* selem has already been unlinked from sk */
352                 return;
353
354         local_storage = rcu_dereference_check(selem->local_storage,
355                                               bpf_rcu_lock_held());
356         storage_smap = rcu_dereference_check(local_storage->smap,
357                                              bpf_rcu_lock_held());
358         bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
359
360         raw_spin_lock_irqsave(&local_storage->lock, flags);
361         if (likely(selem_linked_to_storage(selem)))
362                 free_local_storage = bpf_selem_unlink_storage_nolock(
363                         local_storage, selem, true, reuse_now);
364         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
365
366         if (free_local_storage)
367                 bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
368 }
369
370 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
371                                    struct bpf_local_storage_elem *selem)
372 {
373         RCU_INIT_POINTER(selem->local_storage, local_storage);
374         hlist_add_head_rcu(&selem->snode, &local_storage->list);
375 }
376
377 static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
378 {
379         struct bpf_local_storage_map *smap;
380         struct bpf_local_storage_map_bucket *b;
381         unsigned long flags;
382
383         if (unlikely(!selem_linked_to_map_lockless(selem)))
384                 /* selem has already be unlinked from smap */
385                 return;
386
387         smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
388         b = select_bucket(smap, selem);
389         raw_spin_lock_irqsave(&b->lock, flags);
390         if (likely(selem_linked_to_map(selem)))
391                 hlist_del_init_rcu(&selem->map_node);
392         raw_spin_unlock_irqrestore(&b->lock, flags);
393 }
394
395 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
396                         struct bpf_local_storage_elem *selem)
397 {
398         struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
399         unsigned long flags;
400
401         raw_spin_lock_irqsave(&b->lock, flags);
402         RCU_INIT_POINTER(SDATA(selem)->smap, smap);
403         hlist_add_head_rcu(&selem->map_node, &b->list);
404         raw_spin_unlock_irqrestore(&b->lock, flags);
405 }
406
407 void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
408 {
409         /* Always unlink from map before unlinking from local_storage
410          * because selem will be freed after successfully unlinked from
411          * the local_storage.
412          */
413         bpf_selem_unlink_map(selem);
414         bpf_selem_unlink_storage(selem, reuse_now);
415 }
416
417 /* If cacheit_lockit is false, this lookup function is lockless */
418 struct bpf_local_storage_data *
419 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
420                          struct bpf_local_storage_map *smap,
421                          bool cacheit_lockit)
422 {
423         struct bpf_local_storage_data *sdata;
424         struct bpf_local_storage_elem *selem;
425
426         /* Fast path (cache hit) */
427         sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
428                                       bpf_rcu_lock_held());
429         if (sdata && rcu_access_pointer(sdata->smap) == smap)
430                 return sdata;
431
432         /* Slow path (cache miss) */
433         hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
434                                   rcu_read_lock_trace_held())
435                 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
436                         break;
437
438         if (!selem)
439                 return NULL;
440
441         sdata = SDATA(selem);
442         if (cacheit_lockit) {
443                 unsigned long flags;
444
445                 /* spinlock is needed to avoid racing with the
446                  * parallel delete.  Otherwise, publishing an already
447                  * deleted sdata to the cache will become a use-after-free
448                  * problem in the next bpf_local_storage_lookup().
449                  */
450                 raw_spin_lock_irqsave(&local_storage->lock, flags);
451                 if (selem_linked_to_storage(selem))
452                         rcu_assign_pointer(local_storage->cache[smap->cache_idx],
453                                            sdata);
454                 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
455         }
456
457         return sdata;
458 }
459
460 static int check_flags(const struct bpf_local_storage_data *old_sdata,
461                        u64 map_flags)
462 {
463         if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
464                 /* elem already exists */
465                 return -EEXIST;
466
467         if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
468                 /* elem doesn't exist, cannot update it */
469                 return -ENOENT;
470
471         return 0;
472 }
473
474 int bpf_local_storage_alloc(void *owner,
475                             struct bpf_local_storage_map *smap,
476                             struct bpf_local_storage_elem *first_selem,
477                             gfp_t gfp_flags)
478 {
479         struct bpf_local_storage *prev_storage, *storage;
480         struct bpf_local_storage **owner_storage_ptr;
481         int err;
482
483         err = mem_charge(smap, owner, sizeof(*storage));
484         if (err)
485                 return err;
486
487         if (smap->bpf_ma) {
488                 migrate_disable();
489                 storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
490                 migrate_enable();
491         } else {
492                 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
493                                           gfp_flags | __GFP_NOWARN);
494         }
495
496         if (!storage) {
497                 err = -ENOMEM;
498                 goto uncharge;
499         }
500
501         RCU_INIT_POINTER(storage->smap, smap);
502         INIT_HLIST_HEAD(&storage->list);
503         raw_spin_lock_init(&storage->lock);
504         storage->owner = owner;
505
506         bpf_selem_link_storage_nolock(storage, first_selem);
507         bpf_selem_link_map(smap, first_selem);
508
509         owner_storage_ptr =
510                 (struct bpf_local_storage **)owner_storage(smap, owner);
511         /* Publish storage to the owner.
512          * Instead of using any lock of the kernel object (i.e. owner),
513          * cmpxchg will work with any kernel object regardless what
514          * the running context is, bh, irq...etc.
515          *
516          * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
517          * is protected by the storage->lock.  Hence, when freeing
518          * the owner->storage, the storage->lock must be held before
519          * setting owner->storage ptr to NULL.
520          */
521         prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
522         if (unlikely(prev_storage)) {
523                 bpf_selem_unlink_map(first_selem);
524                 err = -EAGAIN;
525                 goto uncharge;
526
527                 /* Note that even first_selem was linked to smap's
528                  * bucket->list, first_selem can be freed immediately
529                  * (instead of kfree_rcu) because
530                  * bpf_local_storage_map_free() does a
531                  * synchronize_rcu_mult (waiting for both sleepable and
532                  * normal programs) before walking the bucket->list.
533                  * Hence, no one is accessing selem from the
534                  * bucket->list under rcu_read_lock().
535                  */
536         }
537
538         return 0;
539
540 uncharge:
541         bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
542         mem_uncharge(smap, owner, sizeof(*storage));
543         return err;
544 }
545
546 /* sk cannot be going away because it is linking new elem
547  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
548  * Otherwise, it will become a leak (and other memory issues
549  * during map destruction).
550  */
551 struct bpf_local_storage_data *
552 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
553                          void *value, u64 map_flags, gfp_t gfp_flags)
554 {
555         struct bpf_local_storage_data *old_sdata = NULL;
556         struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
557         struct bpf_local_storage *local_storage;
558         unsigned long flags;
559         int err;
560
561         /* BPF_EXIST and BPF_NOEXIST cannot be both set */
562         if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
563             /* BPF_F_LOCK can only be used in a value with spin_lock */
564             unlikely((map_flags & BPF_F_LOCK) &&
565                      !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
566                 return ERR_PTR(-EINVAL);
567
568         if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
569                 return ERR_PTR(-EINVAL);
570
571         local_storage = rcu_dereference_check(*owner_storage(smap, owner),
572                                               bpf_rcu_lock_held());
573         if (!local_storage || hlist_empty(&local_storage->list)) {
574                 /* Very first elem for the owner */
575                 err = check_flags(NULL, map_flags);
576                 if (err)
577                         return ERR_PTR(err);
578
579                 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
580                 if (!selem)
581                         return ERR_PTR(-ENOMEM);
582
583                 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
584                 if (err) {
585                         bpf_selem_free(selem, smap, true);
586                         mem_uncharge(smap, owner, smap->elem_size);
587                         return ERR_PTR(err);
588                 }
589
590                 return SDATA(selem);
591         }
592
593         if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
594                 /* Hoping to find an old_sdata to do inline update
595                  * such that it can avoid taking the local_storage->lock
596                  * and changing the lists.
597                  */
598                 old_sdata =
599                         bpf_local_storage_lookup(local_storage, smap, false);
600                 err = check_flags(old_sdata, map_flags);
601                 if (err)
602                         return ERR_PTR(err);
603                 if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
604                         copy_map_value_locked(&smap->map, old_sdata->data,
605                                               value, false);
606                         return old_sdata;
607                 }
608         }
609
610         /* A lookup has just been done before and concluded a new selem is
611          * needed. The chance of an unnecessary alloc is unlikely.
612          */
613         alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
614         if (!alloc_selem)
615                 return ERR_PTR(-ENOMEM);
616
617         raw_spin_lock_irqsave(&local_storage->lock, flags);
618
619         /* Recheck local_storage->list under local_storage->lock */
620         if (unlikely(hlist_empty(&local_storage->list))) {
621                 /* A parallel del is happening and local_storage is going
622                  * away.  It has just been checked before, so very
623                  * unlikely.  Return instead of retry to keep things
624                  * simple.
625                  */
626                 err = -EAGAIN;
627                 goto unlock;
628         }
629
630         old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
631         err = check_flags(old_sdata, map_flags);
632         if (err)
633                 goto unlock;
634
635         if (old_sdata && (map_flags & BPF_F_LOCK)) {
636                 copy_map_value_locked(&smap->map, old_sdata->data, value,
637                                       false);
638                 selem = SELEM(old_sdata);
639                 goto unlock;
640         }
641
642         alloc_selem = NULL;
643         /* First, link the new selem to the map */
644         bpf_selem_link_map(smap, selem);
645
646         /* Second, link (and publish) the new selem to local_storage */
647         bpf_selem_link_storage_nolock(local_storage, selem);
648
649         /* Third, remove old selem, SELEM(old_sdata) */
650         if (old_sdata) {
651                 bpf_selem_unlink_map(SELEM(old_sdata));
652                 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
653                                                 true, false);
654         }
655
656 unlock:
657         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
658         if (alloc_selem) {
659                 mem_uncharge(smap, owner, smap->elem_size);
660                 bpf_selem_free(alloc_selem, smap, true);
661         }
662         return err ? ERR_PTR(err) : SDATA(selem);
663 }
664
665 static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
666 {
667         u64 min_usage = U64_MAX;
668         u16 i, res = 0;
669
670         spin_lock(&cache->idx_lock);
671
672         for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
673                 if (cache->idx_usage_counts[i] < min_usage) {
674                         min_usage = cache->idx_usage_counts[i];
675                         res = i;
676
677                         /* Found a free cache_idx */
678                         if (!min_usage)
679                                 break;
680                 }
681         }
682         cache->idx_usage_counts[res]++;
683
684         spin_unlock(&cache->idx_lock);
685
686         return res;
687 }
688
689 static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
690                                              u16 idx)
691 {
692         spin_lock(&cache->idx_lock);
693         cache->idx_usage_counts[idx]--;
694         spin_unlock(&cache->idx_lock);
695 }
696
697 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
698 {
699         if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
700             !(attr->map_flags & BPF_F_NO_PREALLOC) ||
701             attr->max_entries ||
702             attr->key_size != sizeof(int) || !attr->value_size ||
703             /* Enforce BTF for userspace sk dumping */
704             !attr->btf_key_type_id || !attr->btf_value_type_id)
705                 return -EINVAL;
706
707         if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
708                 return -E2BIG;
709
710         return 0;
711 }
712
713 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
714                                     const struct btf *btf,
715                                     const struct btf_type *key_type,
716                                     const struct btf_type *value_type)
717 {
718         u32 int_data;
719
720         if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
721                 return -EINVAL;
722
723         int_data = *(u32 *)(key_type + 1);
724         if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
725                 return -EINVAL;
726
727         return 0;
728 }
729
730 void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
731 {
732         struct bpf_local_storage_map *storage_smap;
733         struct bpf_local_storage_elem *selem;
734         bool bpf_ma, free_storage = false;
735         struct hlist_node *n;
736         unsigned long flags;
737
738         storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
739         bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
740
741         /* Neither the bpf_prog nor the bpf_map's syscall
742          * could be modifying the local_storage->list now.
743          * Thus, no elem can be added to or deleted from the
744          * local_storage->list by the bpf_prog or by the bpf_map's syscall.
745          *
746          * It is racing with bpf_local_storage_map_free() alone
747          * when unlinking elem from the local_storage->list and
748          * the map's bucket->list.
749          */
750         raw_spin_lock_irqsave(&local_storage->lock, flags);
751         hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
752                 /* Always unlink from map before unlinking from
753                  * local_storage.
754                  */
755                 bpf_selem_unlink_map(selem);
756                 /* If local_storage list has only one element, the
757                  * bpf_selem_unlink_storage_nolock() will return true.
758                  * Otherwise, it will return false. The current loop iteration
759                  * intends to remove all local storage. So the last iteration
760                  * of the loop will set the free_cgroup_storage to true.
761                  */
762                 free_storage = bpf_selem_unlink_storage_nolock(
763                         local_storage, selem, true, true);
764         }
765         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
766
767         if (free_storage)
768                 bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);
769 }
770
771 u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
772 {
773         struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
774         u64 usage = sizeof(*smap);
775
776         /* The dynamically callocated selems are not counted currently. */
777         usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
778         return usage;
779 }
780
781 /* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
782  * A deadlock free allocator is useful for storage that the bpf prog can easily
783  * get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
784  * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
785  * memory immediately. To be reuse-immediate safe, the owner destruction
786  * code path needs to go through a rcu grace period before calling
787  * bpf_local_storage_destroy().
788  *
789  * When bpf_ma == false, the kmalloc and kfree are used.
790  */
791 struct bpf_map *
792 bpf_local_storage_map_alloc(union bpf_attr *attr,
793                             struct bpf_local_storage_cache *cache,
794                             bool bpf_ma)
795 {
796         struct bpf_local_storage_map *smap;
797         unsigned int i;
798         u32 nbuckets;
799         int err;
800
801         smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
802         if (!smap)
803                 return ERR_PTR(-ENOMEM);
804         bpf_map_init_from_attr(&smap->map, attr);
805
806         nbuckets = roundup_pow_of_two(num_possible_cpus());
807         /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
808         nbuckets = max_t(u32, 2, nbuckets);
809         smap->bucket_log = ilog2(nbuckets);
810
811         smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
812                                          nbuckets, GFP_USER | __GFP_NOWARN);
813         if (!smap->buckets) {
814                 err = -ENOMEM;
815                 goto free_smap;
816         }
817
818         for (i = 0; i < nbuckets; i++) {
819                 INIT_HLIST_HEAD(&smap->buckets[i].list);
820                 raw_spin_lock_init(&smap->buckets[i].lock);
821         }
822
823         smap->elem_size = offsetof(struct bpf_local_storage_elem,
824                                    sdata.data[attr->value_size]);
825
826         smap->bpf_ma = bpf_ma;
827         if (bpf_ma) {
828                 err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
829                 if (err)
830                         goto free_smap;
831
832                 err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
833                 if (err) {
834                         bpf_mem_alloc_destroy(&smap->selem_ma);
835                         goto free_smap;
836                 }
837         }
838
839         smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
840         return &smap->map;
841
842 free_smap:
843         kvfree(smap->buckets);
844         bpf_map_area_free(smap);
845         return ERR_PTR(err);
846 }
847
848 void bpf_local_storage_map_free(struct bpf_map *map,
849                                 struct bpf_local_storage_cache *cache,
850                                 int __percpu *busy_counter)
851 {
852         struct bpf_local_storage_map_bucket *b;
853         struct bpf_local_storage_elem *selem;
854         struct bpf_local_storage_map *smap;
855         unsigned int i;
856
857         smap = (struct bpf_local_storage_map *)map;
858         bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
859
860         /* Note that this map might be concurrently cloned from
861          * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
862          * RCU read section to finish before proceeding. New RCU
863          * read sections should be prevented via bpf_map_inc_not_zero.
864          */
865         synchronize_rcu();
866
867         /* bpf prog and the userspace can no longer access this map
868          * now.  No new selem (of this map) can be added
869          * to the owner->storage or to the map bucket's list.
870          *
871          * The elem of this map can be cleaned up here
872          * or when the storage is freed e.g.
873          * by bpf_sk_storage_free() during __sk_destruct().
874          */
875         for (i = 0; i < (1U << smap->bucket_log); i++) {
876                 b = &smap->buckets[i];
877
878                 rcu_read_lock();
879                 /* No one is adding to b->list now */
880                 while ((selem = hlist_entry_safe(
881                                 rcu_dereference_raw(hlist_first_rcu(&b->list)),
882                                 struct bpf_local_storage_elem, map_node))) {
883                         if (busy_counter) {
884                                 migrate_disable();
885                                 this_cpu_inc(*busy_counter);
886                         }
887                         bpf_selem_unlink(selem, true);
888                         if (busy_counter) {
889                                 this_cpu_dec(*busy_counter);
890                                 migrate_enable();
891                         }
892                         cond_resched_rcu();
893                 }
894                 rcu_read_unlock();
895         }
896
897         /* While freeing the storage we may still need to access the map.
898          *
899          * e.g. when bpf_sk_storage_free() has unlinked selem from the map
900          * which then made the above while((selem = ...)) loop
901          * exit immediately.
902          *
903          * However, while freeing the storage one still needs to access the
904          * smap->elem_size to do the uncharging in
905          * bpf_selem_unlink_storage_nolock().
906          *
907          * Hence, wait another rcu grace period for the storage to be freed.
908          */
909         synchronize_rcu();
910
911         if (smap->bpf_ma) {
912                 bpf_mem_alloc_destroy(&smap->selem_ma);
913                 bpf_mem_alloc_destroy(&smap->storage_ma);
914         }
915         kvfree(smap->buckets);
916         bpf_map_area_free(smap);
917 }