perf script python: Fix unintended underline
[linux-2.6-microblaze.git] / kernel / bpf / local_storage.c
1 //SPDX-License-Identifier: GPL-2.0
2 #include <linux/bpf-cgroup.h>
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/bug.h>
6 #include <linux/filter.h>
7 #include <linux/mm.h>
8 #include <linux/rbtree.h>
9 #include <linux/slab.h>
10 #include <uapi/linux/btf.h>
11
12 #ifdef CONFIG_CGROUP_BPF
13
14 DEFINE_PER_CPU(struct bpf_cgroup_storage_info,
15                bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
16
17 #include "../cgroup/cgroup-internal.h"
18
19 #define LOCAL_STORAGE_CREATE_FLAG_MASK                                  \
20         (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
21
22 struct bpf_cgroup_storage_map {
23         struct bpf_map map;
24
25         spinlock_t lock;
26         struct rb_root root;
27         struct list_head list;
28 };
29
30 static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
31 {
32         return container_of(map, struct bpf_cgroup_storage_map, map);
33 }
34
35 static bool attach_type_isolated(const struct bpf_map *map)
36 {
37         return map->key_size == sizeof(struct bpf_cgroup_storage_key);
38 }
39
40 static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map,
41                                       const void *_key1, const void *_key2)
42 {
43         if (attach_type_isolated(&map->map)) {
44                 const struct bpf_cgroup_storage_key *key1 = _key1;
45                 const struct bpf_cgroup_storage_key *key2 = _key2;
46
47                 if (key1->cgroup_inode_id < key2->cgroup_inode_id)
48                         return -1;
49                 else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
50                         return 1;
51                 else if (key1->attach_type < key2->attach_type)
52                         return -1;
53                 else if (key1->attach_type > key2->attach_type)
54                         return 1;
55         } else {
56                 const __u64 *cgroup_inode_id1 = _key1;
57                 const __u64 *cgroup_inode_id2 = _key2;
58
59                 if (*cgroup_inode_id1 < *cgroup_inode_id2)
60                         return -1;
61                 else if (*cgroup_inode_id1 > *cgroup_inode_id2)
62                         return 1;
63         }
64         return 0;
65 }
66
67 struct bpf_cgroup_storage *
68 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
69                       void *key, bool locked)
70 {
71         struct rb_root *root = &map->root;
72         struct rb_node *node;
73
74         if (!locked)
75                 spin_lock_bh(&map->lock);
76
77         node = root->rb_node;
78         while (node) {
79                 struct bpf_cgroup_storage *storage;
80
81                 storage = container_of(node, struct bpf_cgroup_storage, node);
82
83                 switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) {
84                 case -1:
85                         node = node->rb_left;
86                         break;
87                 case 1:
88                         node = node->rb_right;
89                         break;
90                 default:
91                         if (!locked)
92                                 spin_unlock_bh(&map->lock);
93                         return storage;
94                 }
95         }
96
97         if (!locked)
98                 spin_unlock_bh(&map->lock);
99
100         return NULL;
101 }
102
103 static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
104                                  struct bpf_cgroup_storage *storage)
105 {
106         struct rb_root *root = &map->root;
107         struct rb_node **new = &(root->rb_node), *parent = NULL;
108
109         while (*new) {
110                 struct bpf_cgroup_storage *this;
111
112                 this = container_of(*new, struct bpf_cgroup_storage, node);
113
114                 parent = *new;
115                 switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) {
116                 case -1:
117                         new = &((*new)->rb_left);
118                         break;
119                 case 1:
120                         new = &((*new)->rb_right);
121                         break;
122                 default:
123                         return -EEXIST;
124                 }
125         }
126
127         rb_link_node(&storage->node, parent, new);
128         rb_insert_color(&storage->node, root);
129
130         return 0;
131 }
132
133 static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key)
134 {
135         struct bpf_cgroup_storage_map *map = map_to_storage(_map);
136         struct bpf_cgroup_storage *storage;
137
138         storage = cgroup_storage_lookup(map, key, false);
139         if (!storage)
140                 return NULL;
141
142         return &READ_ONCE(storage->buf)->data[0];
143 }
144
145 static int cgroup_storage_update_elem(struct bpf_map *map, void *key,
146                                       void *value, u64 flags)
147 {
148         struct bpf_cgroup_storage *storage;
149         struct bpf_storage_buffer *new;
150
151         if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST)))
152                 return -EINVAL;
153
154         if (unlikely((flags & BPF_F_LOCK) &&
155                      !map_value_has_spin_lock(map)))
156                 return -EINVAL;
157
158         storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
159                                         key, false);
160         if (!storage)
161                 return -ENOENT;
162
163         if (flags & BPF_F_LOCK) {
164                 copy_map_value_locked(map, storage->buf->data, value, false);
165                 return 0;
166         }
167
168         new = bpf_map_kmalloc_node(map, sizeof(struct bpf_storage_buffer) +
169                                    map->value_size,
170                                    __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
171                                    map->numa_node);
172         if (!new)
173                 return -ENOMEM;
174
175         memcpy(&new->data[0], value, map->value_size);
176         check_and_init_map_lock(map, new->data);
177
178         new = xchg(&storage->buf, new);
179         kfree_rcu(new, rcu);
180
181         return 0;
182 }
183
184 int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
185                                    void *value)
186 {
187         struct bpf_cgroup_storage_map *map = map_to_storage(_map);
188         struct bpf_cgroup_storage *storage;
189         int cpu, off = 0;
190         u32 size;
191
192         rcu_read_lock();
193         storage = cgroup_storage_lookup(map, key, false);
194         if (!storage) {
195                 rcu_read_unlock();
196                 return -ENOENT;
197         }
198
199         /* per_cpu areas are zero-filled and bpf programs can only
200          * access 'value_size' of them, so copying rounded areas
201          * will not leak any kernel data
202          */
203         size = round_up(_map->value_size, 8);
204         for_each_possible_cpu(cpu) {
205                 bpf_long_memcpy(value + off,
206                                 per_cpu_ptr(storage->percpu_buf, cpu), size);
207                 off += size;
208         }
209         rcu_read_unlock();
210         return 0;
211 }
212
213 int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
214                                      void *value, u64 map_flags)
215 {
216         struct bpf_cgroup_storage_map *map = map_to_storage(_map);
217         struct bpf_cgroup_storage *storage;
218         int cpu, off = 0;
219         u32 size;
220
221         if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
222                 return -EINVAL;
223
224         rcu_read_lock();
225         storage = cgroup_storage_lookup(map, key, false);
226         if (!storage) {
227                 rcu_read_unlock();
228                 return -ENOENT;
229         }
230
231         /* the user space will provide round_up(value_size, 8) bytes that
232          * will be copied into per-cpu area. bpf programs can only access
233          * value_size of it. During lookup the same extra bytes will be
234          * returned or zeros which were zero-filled by percpu_alloc,
235          * so no kernel data leaks possible
236          */
237         size = round_up(_map->value_size, 8);
238         for_each_possible_cpu(cpu) {
239                 bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
240                                 value + off, size);
241                 off += size;
242         }
243         rcu_read_unlock();
244         return 0;
245 }
246
247 static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key,
248                                        void *_next_key)
249 {
250         struct bpf_cgroup_storage_map *map = map_to_storage(_map);
251         struct bpf_cgroup_storage *storage;
252
253         spin_lock_bh(&map->lock);
254
255         if (list_empty(&map->list))
256                 goto enoent;
257
258         if (key) {
259                 storage = cgroup_storage_lookup(map, key, true);
260                 if (!storage)
261                         goto enoent;
262
263                 storage = list_next_entry(storage, list_map);
264                 if (!storage)
265                         goto enoent;
266         } else {
267                 storage = list_first_entry(&map->list,
268                                          struct bpf_cgroup_storage, list_map);
269         }
270
271         spin_unlock_bh(&map->lock);
272
273         if (attach_type_isolated(&map->map)) {
274                 struct bpf_cgroup_storage_key *next = _next_key;
275                 *next = storage->key;
276         } else {
277                 __u64 *next = _next_key;
278                 *next = storage->key.cgroup_inode_id;
279         }
280         return 0;
281
282 enoent:
283         spin_unlock_bh(&map->lock);
284         return -ENOENT;
285 }
286
287 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
288 {
289         int numa_node = bpf_map_attr_numa_node(attr);
290         struct bpf_cgroup_storage_map *map;
291
292         if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) &&
293             attr->key_size != sizeof(__u64))
294                 return ERR_PTR(-EINVAL);
295
296         if (attr->value_size == 0)
297                 return ERR_PTR(-EINVAL);
298
299         if (attr->value_size > PAGE_SIZE)
300                 return ERR_PTR(-E2BIG);
301
302         if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK ||
303             !bpf_map_flags_access_ok(attr->map_flags))
304                 return ERR_PTR(-EINVAL);
305
306         if (attr->max_entries)
307                 /* max_entries is not used and enforced to be 0 */
308                 return ERR_PTR(-EINVAL);
309
310         map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
311                            __GFP_ZERO | GFP_USER | __GFP_ACCOUNT, numa_node);
312         if (!map)
313                 return ERR_PTR(-ENOMEM);
314
315         /* copy mandatory map attributes */
316         bpf_map_init_from_attr(&map->map, attr);
317
318         spin_lock_init(&map->lock);
319         map->root = RB_ROOT;
320         INIT_LIST_HEAD(&map->list);
321
322         return &map->map;
323 }
324
325 static void cgroup_storage_map_free(struct bpf_map *_map)
326 {
327         struct bpf_cgroup_storage_map *map = map_to_storage(_map);
328         struct list_head *storages = &map->list;
329         struct bpf_cgroup_storage *storage, *stmp;
330
331         mutex_lock(&cgroup_mutex);
332
333         list_for_each_entry_safe(storage, stmp, storages, list_map) {
334                 bpf_cgroup_storage_unlink(storage);
335                 bpf_cgroup_storage_free(storage);
336         }
337
338         mutex_unlock(&cgroup_mutex);
339
340         WARN_ON(!RB_EMPTY_ROOT(&map->root));
341         WARN_ON(!list_empty(&map->list));
342
343         kfree(map);
344 }
345
346 static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
347 {
348         return -EINVAL;
349 }
350
351 static int cgroup_storage_check_btf(const struct bpf_map *map,
352                                     const struct btf *btf,
353                                     const struct btf_type *key_type,
354                                     const struct btf_type *value_type)
355 {
356         if (attach_type_isolated(map)) {
357                 struct btf_member *m;
358                 u32 offset, size;
359
360                 /* Key is expected to be of struct bpf_cgroup_storage_key type,
361                  * which is:
362                  * struct bpf_cgroup_storage_key {
363                  *      __u64   cgroup_inode_id;
364                  *      __u32   attach_type;
365                  * };
366                  */
367
368                 /*
369                  * Key_type must be a structure with two fields.
370                  */
371                 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
372                     BTF_INFO_VLEN(key_type->info) != 2)
373                         return -EINVAL;
374
375                 /*
376                  * The first field must be a 64 bit integer at 0 offset.
377                  */
378                 m = (struct btf_member *)(key_type + 1);
379                 size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id);
380                 if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
381                         return -EINVAL;
382
383                 /*
384                  * The second field must be a 32 bit integer at 64 bit offset.
385                  */
386                 m++;
387                 offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
388                 size = sizeof_field(struct bpf_cgroup_storage_key, attach_type);
389                 if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
390                         return -EINVAL;
391         } else {
392                 u32 int_data;
393
394                 /*
395                  * Key is expected to be u64, which stores the cgroup_inode_id
396                  */
397
398                 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
399                         return -EINVAL;
400
401                 int_data = *(u32 *)(key_type + 1);
402                 if (BTF_INT_BITS(int_data) != 64 || BTF_INT_OFFSET(int_data))
403                         return -EINVAL;
404         }
405
406         return 0;
407 }
408
409 static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
410                                          struct seq_file *m)
411 {
412         enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
413         struct bpf_cgroup_storage *storage;
414         int cpu;
415
416         rcu_read_lock();
417         storage = cgroup_storage_lookup(map_to_storage(map), key, false);
418         if (!storage) {
419                 rcu_read_unlock();
420                 return;
421         }
422
423         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
424         stype = cgroup_storage_type(map);
425         if (stype == BPF_CGROUP_STORAGE_SHARED) {
426                 seq_puts(m, ": ");
427                 btf_type_seq_show(map->btf, map->btf_value_type_id,
428                                   &READ_ONCE(storage->buf)->data[0], m);
429                 seq_puts(m, "\n");
430         } else {
431                 seq_puts(m, ": {\n");
432                 for_each_possible_cpu(cpu) {
433                         seq_printf(m, "\tcpu%d: ", cpu);
434                         btf_type_seq_show(map->btf, map->btf_value_type_id,
435                                           per_cpu_ptr(storage->percpu_buf, cpu),
436                                           m);
437                         seq_puts(m, "\n");
438                 }
439                 seq_puts(m, "}\n");
440         }
441         rcu_read_unlock();
442 }
443
444 static int cgroup_storage_map_btf_id;
445 const struct bpf_map_ops cgroup_storage_map_ops = {
446         .map_alloc = cgroup_storage_map_alloc,
447         .map_free = cgroup_storage_map_free,
448         .map_get_next_key = cgroup_storage_get_next_key,
449         .map_lookup_elem = cgroup_storage_lookup_elem,
450         .map_update_elem = cgroup_storage_update_elem,
451         .map_delete_elem = cgroup_storage_delete_elem,
452         .map_check_btf = cgroup_storage_check_btf,
453         .map_seq_show_elem = cgroup_storage_seq_show_elem,
454         .map_btf_name = "bpf_cgroup_storage_map",
455         .map_btf_id = &cgroup_storage_map_btf_id,
456 };
457
458 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
459 {
460         enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
461
462         if (aux->cgroup_storage[stype] &&
463             aux->cgroup_storage[stype] != _map)
464                 return -EBUSY;
465
466         aux->cgroup_storage[stype] = _map;
467         return 0;
468 }
469
470 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
471 {
472         size_t size;
473
474         if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
475                 size = sizeof(struct bpf_storage_buffer) + map->value_size;
476                 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
477                                   PAGE_SIZE) >> PAGE_SHIFT;
478         } else {
479                 size = map->value_size;
480                 *pages = round_up(round_up(size, 8) * num_possible_cpus(),
481                                   PAGE_SIZE) >> PAGE_SHIFT;
482         }
483
484         return size;
485 }
486
487 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
488                                         enum bpf_cgroup_storage_type stype)
489 {
490         const gfp_t gfp = __GFP_ZERO | GFP_USER;
491         struct bpf_cgroup_storage *storage;
492         struct bpf_map *map;
493         size_t size;
494         u32 pages;
495
496         map = prog->aux->cgroup_storage[stype];
497         if (!map)
498                 return NULL;
499
500         size = bpf_cgroup_storage_calculate_size(map, &pages);
501
502         storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage),
503                                        gfp, map->numa_node);
504         if (!storage)
505                 goto enomem;
506
507         if (stype == BPF_CGROUP_STORAGE_SHARED) {
508                 storage->buf = bpf_map_kmalloc_node(map, size, gfp,
509                                                     map->numa_node);
510                 if (!storage->buf)
511                         goto enomem;
512                 check_and_init_map_lock(map, storage->buf->data);
513         } else {
514                 storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp);
515                 if (!storage->percpu_buf)
516                         goto enomem;
517         }
518
519         storage->map = (struct bpf_cgroup_storage_map *)map;
520
521         return storage;
522
523 enomem:
524         kfree(storage);
525         return ERR_PTR(-ENOMEM);
526 }
527
528 static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
529 {
530         struct bpf_cgroup_storage *storage =
531                 container_of(rcu, struct bpf_cgroup_storage, rcu);
532
533         kfree(storage->buf);
534         kfree(storage);
535 }
536
537 static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
538 {
539         struct bpf_cgroup_storage *storage =
540                 container_of(rcu, struct bpf_cgroup_storage, rcu);
541
542         free_percpu(storage->percpu_buf);
543         kfree(storage);
544 }
545
546 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
547 {
548         enum bpf_cgroup_storage_type stype;
549         struct bpf_map *map;
550
551         if (!storage)
552                 return;
553
554         map = &storage->map->map;
555         stype = cgroup_storage_type(map);
556         if (stype == BPF_CGROUP_STORAGE_SHARED)
557                 call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
558         else
559                 call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
560 }
561
562 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
563                              struct cgroup *cgroup,
564                              enum bpf_attach_type type)
565 {
566         struct bpf_cgroup_storage_map *map;
567
568         if (!storage)
569                 return;
570
571         storage->key.attach_type = type;
572         storage->key.cgroup_inode_id = cgroup_id(cgroup);
573
574         map = storage->map;
575
576         spin_lock_bh(&map->lock);
577         WARN_ON(cgroup_storage_insert(map, storage));
578         list_add(&storage->list_map, &map->list);
579         list_add(&storage->list_cg, &cgroup->bpf.storages);
580         spin_unlock_bh(&map->lock);
581 }
582
583 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
584 {
585         struct bpf_cgroup_storage_map *map;
586         struct rb_root *root;
587
588         if (!storage)
589                 return;
590
591         map = storage->map;
592
593         spin_lock_bh(&map->lock);
594         root = &map->root;
595         rb_erase(&storage->node, root);
596
597         list_del(&storage->list_map);
598         list_del(&storage->list_cg);
599         spin_unlock_bh(&map->lock);
600 }
601
602 #endif