Merge tag 'char-misc-5.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[linux-2.6-microblaze.git] / kernel / bpf / bpf_local_storage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14
15 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
16
17 static struct bpf_local_storage_map_bucket *
18 select_bucket(struct bpf_local_storage_map *smap,
19               struct bpf_local_storage_elem *selem)
20 {
21         return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
22 }
23
24 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
25 {
26         struct bpf_map *map = &smap->map;
27
28         if (!map->ops->map_local_storage_charge)
29                 return 0;
30
31         return map->ops->map_local_storage_charge(smap, owner, size);
32 }
33
34 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
35                          u32 size)
36 {
37         struct bpf_map *map = &smap->map;
38
39         if (map->ops->map_local_storage_uncharge)
40                 map->ops->map_local_storage_uncharge(smap, owner, size);
41 }
42
43 static struct bpf_local_storage __rcu **
44 owner_storage(struct bpf_local_storage_map *smap, void *owner)
45 {
46         struct bpf_map *map = &smap->map;
47
48         return map->ops->map_owner_storage_ptr(owner);
49 }
50
51 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
52 {
53         return !hlist_unhashed(&selem->snode);
54 }
55
56 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
57 {
58         return !hlist_unhashed(&selem->map_node);
59 }
60
61 struct bpf_local_storage_elem *
62 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
63                 void *value, bool charge_mem)
64 {
65         struct bpf_local_storage_elem *selem;
66
67         if (charge_mem && mem_charge(smap, owner, smap->elem_size))
68                 return NULL;
69
70         selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
71         if (selem) {
72                 if (value)
73                         memcpy(SDATA(selem)->data, value, smap->map.value_size);
74                 return selem;
75         }
76
77         if (charge_mem)
78                 mem_uncharge(smap, owner, smap->elem_size);
79
80         return NULL;
81 }
82
83 /* local_storage->lock must be held and selem->local_storage == local_storage.
84  * The caller must ensure selem->smap is still valid to be
85  * dereferenced for its smap->elem_size and smap->cache_idx.
86  */
87 bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
88                                      struct bpf_local_storage_elem *selem,
89                                      bool uncharge_mem)
90 {
91         struct bpf_local_storage_map *smap;
92         bool free_local_storage;
93         void *owner;
94
95         smap = rcu_dereference(SDATA(selem)->smap);
96         owner = local_storage->owner;
97
98         /* All uncharging on the owner must be done first.
99          * The owner may be freed once the last selem is unlinked
100          * from local_storage.
101          */
102         if (uncharge_mem)
103                 mem_uncharge(smap, owner, smap->elem_size);
104
105         free_local_storage = hlist_is_singular_node(&selem->snode,
106                                                     &local_storage->list);
107         if (free_local_storage) {
108                 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
109                 local_storage->owner = NULL;
110
111                 /* After this RCU_INIT, owner may be freed and cannot be used */
112                 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
113
114                 /* local_storage is not freed now.  local_storage->lock is
115                  * still held and raw_spin_unlock_bh(&local_storage->lock)
116                  * will be done by the caller.
117                  *
118                  * Although the unlock will be done under
119                  * rcu_read_lock(),  it is more intutivie to
120                  * read if kfree_rcu(local_storage, rcu) is done
121                  * after the raw_spin_unlock_bh(&local_storage->lock).
122                  *
123                  * Hence, a "bool free_local_storage" is returned
124                  * to the caller which then calls the kfree_rcu()
125                  * after unlock.
126                  */
127         }
128         hlist_del_init_rcu(&selem->snode);
129         if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
130             SDATA(selem))
131                 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
132
133         kfree_rcu(selem, rcu);
134
135         return free_local_storage;
136 }
137
138 static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
139 {
140         struct bpf_local_storage *local_storage;
141         bool free_local_storage = false;
142
143         if (unlikely(!selem_linked_to_storage(selem)))
144                 /* selem has already been unlinked from sk */
145                 return;
146
147         local_storage = rcu_dereference(selem->local_storage);
148         raw_spin_lock_bh(&local_storage->lock);
149         if (likely(selem_linked_to_storage(selem)))
150                 free_local_storage = bpf_selem_unlink_storage_nolock(
151                         local_storage, selem, true);
152         raw_spin_unlock_bh(&local_storage->lock);
153
154         if (free_local_storage)
155                 kfree_rcu(local_storage, rcu);
156 }
157
158 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
159                                    struct bpf_local_storage_elem *selem)
160 {
161         RCU_INIT_POINTER(selem->local_storage, local_storage);
162         hlist_add_head_rcu(&selem->snode, &local_storage->list);
163 }
164
165 void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
166 {
167         struct bpf_local_storage_map *smap;
168         struct bpf_local_storage_map_bucket *b;
169
170         if (unlikely(!selem_linked_to_map(selem)))
171                 /* selem has already be unlinked from smap */
172                 return;
173
174         smap = rcu_dereference(SDATA(selem)->smap);
175         b = select_bucket(smap, selem);
176         raw_spin_lock_bh(&b->lock);
177         if (likely(selem_linked_to_map(selem)))
178                 hlist_del_init_rcu(&selem->map_node);
179         raw_spin_unlock_bh(&b->lock);
180 }
181
182 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
183                         struct bpf_local_storage_elem *selem)
184 {
185         struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
186
187         raw_spin_lock_bh(&b->lock);
188         RCU_INIT_POINTER(SDATA(selem)->smap, smap);
189         hlist_add_head_rcu(&selem->map_node, &b->list);
190         raw_spin_unlock_bh(&b->lock);
191 }
192
193 void bpf_selem_unlink(struct bpf_local_storage_elem *selem)
194 {
195         /* Always unlink from map before unlinking from local_storage
196          * because selem will be freed after successfully unlinked from
197          * the local_storage.
198          */
199         bpf_selem_unlink_map(selem);
200         __bpf_selem_unlink_storage(selem);
201 }
202
203 struct bpf_local_storage_data *
204 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
205                          struct bpf_local_storage_map *smap,
206                          bool cacheit_lockit)
207 {
208         struct bpf_local_storage_data *sdata;
209         struct bpf_local_storage_elem *selem;
210
211         /* Fast path (cache hit) */
212         sdata = rcu_dereference(local_storage->cache[smap->cache_idx]);
213         if (sdata && rcu_access_pointer(sdata->smap) == smap)
214                 return sdata;
215
216         /* Slow path (cache miss) */
217         hlist_for_each_entry_rcu(selem, &local_storage->list, snode)
218                 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
219                         break;
220
221         if (!selem)
222                 return NULL;
223
224         sdata = SDATA(selem);
225         if (cacheit_lockit) {
226                 /* spinlock is needed to avoid racing with the
227                  * parallel delete.  Otherwise, publishing an already
228                  * deleted sdata to the cache will become a use-after-free
229                  * problem in the next bpf_local_storage_lookup().
230                  */
231                 raw_spin_lock_bh(&local_storage->lock);
232                 if (selem_linked_to_storage(selem))
233                         rcu_assign_pointer(local_storage->cache[smap->cache_idx],
234                                            sdata);
235                 raw_spin_unlock_bh(&local_storage->lock);
236         }
237
238         return sdata;
239 }
240
241 static int check_flags(const struct bpf_local_storage_data *old_sdata,
242                        u64 map_flags)
243 {
244         if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
245                 /* elem already exists */
246                 return -EEXIST;
247
248         if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
249                 /* elem doesn't exist, cannot update it */
250                 return -ENOENT;
251
252         return 0;
253 }
254
255 int bpf_local_storage_alloc(void *owner,
256                             struct bpf_local_storage_map *smap,
257                             struct bpf_local_storage_elem *first_selem)
258 {
259         struct bpf_local_storage *prev_storage, *storage;
260         struct bpf_local_storage **owner_storage_ptr;
261         int err;
262
263         err = mem_charge(smap, owner, sizeof(*storage));
264         if (err)
265                 return err;
266
267         storage = kzalloc(sizeof(*storage), GFP_ATOMIC | __GFP_NOWARN);
268         if (!storage) {
269                 err = -ENOMEM;
270                 goto uncharge;
271         }
272
273         INIT_HLIST_HEAD(&storage->list);
274         raw_spin_lock_init(&storage->lock);
275         storage->owner = owner;
276
277         bpf_selem_link_storage_nolock(storage, first_selem);
278         bpf_selem_link_map(smap, first_selem);
279
280         owner_storage_ptr =
281                 (struct bpf_local_storage **)owner_storage(smap, owner);
282         /* Publish storage to the owner.
283          * Instead of using any lock of the kernel object (i.e. owner),
284          * cmpxchg will work with any kernel object regardless what
285          * the running context is, bh, irq...etc.
286          *
287          * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
288          * is protected by the storage->lock.  Hence, when freeing
289          * the owner->storage, the storage->lock must be held before
290          * setting owner->storage ptr to NULL.
291          */
292         prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
293         if (unlikely(prev_storage)) {
294                 bpf_selem_unlink_map(first_selem);
295                 err = -EAGAIN;
296                 goto uncharge;
297
298                 /* Note that even first_selem was linked to smap's
299                  * bucket->list, first_selem can be freed immediately
300                  * (instead of kfree_rcu) because
301                  * bpf_local_storage_map_free() does a
302                  * synchronize_rcu() before walking the bucket->list.
303                  * Hence, no one is accessing selem from the
304                  * bucket->list under rcu_read_lock().
305                  */
306         }
307
308         return 0;
309
310 uncharge:
311         kfree(storage);
312         mem_uncharge(smap, owner, sizeof(*storage));
313         return err;
314 }
315
316 /* sk cannot be going away because it is linking new elem
317  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
318  * Otherwise, it will become a leak (and other memory issues
319  * during map destruction).
320  */
321 struct bpf_local_storage_data *
322 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
323                          void *value, u64 map_flags)
324 {
325         struct bpf_local_storage_data *old_sdata = NULL;
326         struct bpf_local_storage_elem *selem;
327         struct bpf_local_storage *local_storage;
328         int err;
329
330         /* BPF_EXIST and BPF_NOEXIST cannot be both set */
331         if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
332             /* BPF_F_LOCK can only be used in a value with spin_lock */
333             unlikely((map_flags & BPF_F_LOCK) &&
334                      !map_value_has_spin_lock(&smap->map)))
335                 return ERR_PTR(-EINVAL);
336
337         local_storage = rcu_dereference(*owner_storage(smap, owner));
338         if (!local_storage || hlist_empty(&local_storage->list)) {
339                 /* Very first elem for the owner */
340                 err = check_flags(NULL, map_flags);
341                 if (err)
342                         return ERR_PTR(err);
343
344                 selem = bpf_selem_alloc(smap, owner, value, true);
345                 if (!selem)
346                         return ERR_PTR(-ENOMEM);
347
348                 err = bpf_local_storage_alloc(owner, smap, selem);
349                 if (err) {
350                         kfree(selem);
351                         mem_uncharge(smap, owner, smap->elem_size);
352                         return ERR_PTR(err);
353                 }
354
355                 return SDATA(selem);
356         }
357
358         if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
359                 /* Hoping to find an old_sdata to do inline update
360                  * such that it can avoid taking the local_storage->lock
361                  * and changing the lists.
362                  */
363                 old_sdata =
364                         bpf_local_storage_lookup(local_storage, smap, false);
365                 err = check_flags(old_sdata, map_flags);
366                 if (err)
367                         return ERR_PTR(err);
368                 if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) {
369                         copy_map_value_locked(&smap->map, old_sdata->data,
370                                               value, false);
371                         return old_sdata;
372                 }
373         }
374
375         raw_spin_lock_bh(&local_storage->lock);
376
377         /* Recheck local_storage->list under local_storage->lock */
378         if (unlikely(hlist_empty(&local_storage->list))) {
379                 /* A parallel del is happening and local_storage is going
380                  * away.  It has just been checked before, so very
381                  * unlikely.  Return instead of retry to keep things
382                  * simple.
383                  */
384                 err = -EAGAIN;
385                 goto unlock_err;
386         }
387
388         old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
389         err = check_flags(old_sdata, map_flags);
390         if (err)
391                 goto unlock_err;
392
393         if (old_sdata && (map_flags & BPF_F_LOCK)) {
394                 copy_map_value_locked(&smap->map, old_sdata->data, value,
395                                       false);
396                 selem = SELEM(old_sdata);
397                 goto unlock;
398         }
399
400         /* local_storage->lock is held.  Hence, we are sure
401          * we can unlink and uncharge the old_sdata successfully
402          * later.  Hence, instead of charging the new selem now
403          * and then uncharge the old selem later (which may cause
404          * a potential but unnecessary charge failure),  avoid taking
405          * a charge at all here (the "!old_sdata" check) and the
406          * old_sdata will not be uncharged later during
407          * bpf_selem_unlink_storage_nolock().
408          */
409         selem = bpf_selem_alloc(smap, owner, value, !old_sdata);
410         if (!selem) {
411                 err = -ENOMEM;
412                 goto unlock_err;
413         }
414
415         /* First, link the new selem to the map */
416         bpf_selem_link_map(smap, selem);
417
418         /* Second, link (and publish) the new selem to local_storage */
419         bpf_selem_link_storage_nolock(local_storage, selem);
420
421         /* Third, remove old selem, SELEM(old_sdata) */
422         if (old_sdata) {
423                 bpf_selem_unlink_map(SELEM(old_sdata));
424                 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
425                                                 false);
426         }
427
428 unlock:
429         raw_spin_unlock_bh(&local_storage->lock);
430         return SDATA(selem);
431
432 unlock_err:
433         raw_spin_unlock_bh(&local_storage->lock);
434         return ERR_PTR(err);
435 }
436
437 u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
438 {
439         u64 min_usage = U64_MAX;
440         u16 i, res = 0;
441
442         spin_lock(&cache->idx_lock);
443
444         for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
445                 if (cache->idx_usage_counts[i] < min_usage) {
446                         min_usage = cache->idx_usage_counts[i];
447                         res = i;
448
449                         /* Found a free cache_idx */
450                         if (!min_usage)
451                                 break;
452                 }
453         }
454         cache->idx_usage_counts[res]++;
455
456         spin_unlock(&cache->idx_lock);
457
458         return res;
459 }
460
461 void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
462                                       u16 idx)
463 {
464         spin_lock(&cache->idx_lock);
465         cache->idx_usage_counts[idx]--;
466         spin_unlock(&cache->idx_lock);
467 }
468
469 void bpf_local_storage_map_free(struct bpf_local_storage_map *smap)
470 {
471         struct bpf_local_storage_elem *selem;
472         struct bpf_local_storage_map_bucket *b;
473         unsigned int i;
474
475         /* Note that this map might be concurrently cloned from
476          * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
477          * RCU read section to finish before proceeding. New RCU
478          * read sections should be prevented via bpf_map_inc_not_zero.
479          */
480         synchronize_rcu();
481
482         /* bpf prog and the userspace can no longer access this map
483          * now.  No new selem (of this map) can be added
484          * to the owner->storage or to the map bucket's list.
485          *
486          * The elem of this map can be cleaned up here
487          * or when the storage is freed e.g.
488          * by bpf_sk_storage_free() during __sk_destruct().
489          */
490         for (i = 0; i < (1U << smap->bucket_log); i++) {
491                 b = &smap->buckets[i];
492
493                 rcu_read_lock();
494                 /* No one is adding to b->list now */
495                 while ((selem = hlist_entry_safe(
496                                 rcu_dereference_raw(hlist_first_rcu(&b->list)),
497                                 struct bpf_local_storage_elem, map_node))) {
498                         bpf_selem_unlink(selem);
499                         cond_resched_rcu();
500                 }
501                 rcu_read_unlock();
502         }
503
504         /* While freeing the storage we may still need to access the map.
505          *
506          * e.g. when bpf_sk_storage_free() has unlinked selem from the map
507          * which then made the above while((selem = ...)) loop
508          * exit immediately.
509          *
510          * However, while freeing the storage one still needs to access the
511          * smap->elem_size to do the uncharging in
512          * bpf_selem_unlink_storage_nolock().
513          *
514          * Hence, wait another rcu grace period for the storage to be freed.
515          */
516         synchronize_rcu();
517
518         kvfree(smap->buckets);
519         kfree(smap);
520 }
521
522 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
523 {
524         if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
525             !(attr->map_flags & BPF_F_NO_PREALLOC) ||
526             attr->max_entries ||
527             attr->key_size != sizeof(int) || !attr->value_size ||
528             /* Enforce BTF for userspace sk dumping */
529             !attr->btf_key_type_id || !attr->btf_value_type_id)
530                 return -EINVAL;
531
532         if (!bpf_capable())
533                 return -EPERM;
534
535         if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
536                 return -E2BIG;
537
538         return 0;
539 }
540
541 struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
542 {
543         struct bpf_local_storage_map *smap;
544         unsigned int i;
545         u32 nbuckets;
546         u64 cost;
547         int ret;
548
549         smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
550         if (!smap)
551                 return ERR_PTR(-ENOMEM);
552         bpf_map_init_from_attr(&smap->map, attr);
553
554         nbuckets = roundup_pow_of_two(num_possible_cpus());
555         /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
556         nbuckets = max_t(u32, 2, nbuckets);
557         smap->bucket_log = ilog2(nbuckets);
558         cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
559
560         ret = bpf_map_charge_init(&smap->map.memory, cost);
561         if (ret < 0) {
562                 kfree(smap);
563                 return ERR_PTR(ret);
564         }
565
566         smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
567                                  GFP_USER | __GFP_NOWARN);
568         if (!smap->buckets) {
569                 bpf_map_charge_finish(&smap->map.memory);
570                 kfree(smap);
571                 return ERR_PTR(-ENOMEM);
572         }
573
574         for (i = 0; i < nbuckets; i++) {
575                 INIT_HLIST_HEAD(&smap->buckets[i].list);
576                 raw_spin_lock_init(&smap->buckets[i].lock);
577         }
578
579         smap->elem_size =
580                 sizeof(struct bpf_local_storage_elem) + attr->value_size;
581
582         return smap;
583 }
584
585 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
586                                     const struct btf *btf,
587                                     const struct btf_type *key_type,
588                                     const struct btf_type *value_type)
589 {
590         u32 int_data;
591
592         if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
593                 return -EINVAL;
594
595         int_data = *(u32 *)(key_type + 1);
596         if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
597                 return -EINVAL;
598
599         return 0;
600 }