Merge tag 'media/v5.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux-2.6-microblaze.git] / kernel / bpf / syscall.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/bpf_verifier.h>
8 #include <linux/btf.h>
9 #include <linux/syscalls.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/vmalloc.h>
13 #include <linux/mmzone.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/fdtable.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/license.h>
19 #include <linux/filter.h>
20 #include <linux/kernel.h>
21 #include <linux/idr.h>
22 #include <linux/cred.h>
23 #include <linux/timekeeping.h>
24 #include <linux/ctype.h>
25 #include <linux/nospec.h>
26 #include <linux/audit.h>
27 #include <uapi/linux/btf.h>
28 #include <linux/pgtable.h>
29 #include <linux/bpf_lsm.h>
30 #include <linux/poll.h>
31 #include <linux/bpf-netns.h>
32 #include <linux/rcupdate_trace.h>
33 #include <linux/memcontrol.h>
34
35 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
36                           (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
37                           (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
38 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
39 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
40 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
41                         IS_FD_HASH(map))
42
43 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
44
45 DEFINE_PER_CPU(int, bpf_prog_active);
46 static DEFINE_IDR(prog_idr);
47 static DEFINE_SPINLOCK(prog_idr_lock);
48 static DEFINE_IDR(map_idr);
49 static DEFINE_SPINLOCK(map_idr_lock);
50 static DEFINE_IDR(link_idr);
51 static DEFINE_SPINLOCK(link_idr_lock);
52
53 int sysctl_unprivileged_bpf_disabled __read_mostly =
54         IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
55
56 static const struct bpf_map_ops * const bpf_map_types[] = {
57 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
58 #define BPF_MAP_TYPE(_id, _ops) \
59         [_id] = &_ops,
60 #define BPF_LINK_TYPE(_id, _name)
61 #include <linux/bpf_types.h>
62 #undef BPF_PROG_TYPE
63 #undef BPF_MAP_TYPE
64 #undef BPF_LINK_TYPE
65 };
66
67 /*
68  * If we're handed a bigger struct than we know of, ensure all the unknown bits
69  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
70  * we don't know about yet.
71  *
72  * There is a ToCToU between this function call and the following
73  * copy_from_user() call. However, this is not a concern since this function is
74  * meant to be a future-proofing of bits.
75  */
76 int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
77                              size_t expected_size,
78                              size_t actual_size)
79 {
80         int res;
81
82         if (unlikely(actual_size > PAGE_SIZE))  /* silly large */
83                 return -E2BIG;
84
85         if (actual_size <= expected_size)
86                 return 0;
87
88         if (uaddr.is_kernel)
89                 res = memchr_inv(uaddr.kernel + expected_size, 0,
90                                  actual_size - expected_size) == NULL;
91         else
92                 res = check_zeroed_user(uaddr.user + expected_size,
93                                         actual_size - expected_size);
94         if (res < 0)
95                 return res;
96         return res ? 0 : -E2BIG;
97 }
98
99 const struct bpf_map_ops bpf_map_offload_ops = {
100         .map_meta_equal = bpf_map_meta_equal,
101         .map_alloc = bpf_map_offload_map_alloc,
102         .map_free = bpf_map_offload_map_free,
103         .map_check_btf = map_check_no_btf,
104 };
105
106 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
107 {
108         const struct bpf_map_ops *ops;
109         u32 type = attr->map_type;
110         struct bpf_map *map;
111         int err;
112
113         if (type >= ARRAY_SIZE(bpf_map_types))
114                 return ERR_PTR(-EINVAL);
115         type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
116         ops = bpf_map_types[type];
117         if (!ops)
118                 return ERR_PTR(-EINVAL);
119
120         if (ops->map_alloc_check) {
121                 err = ops->map_alloc_check(attr);
122                 if (err)
123                         return ERR_PTR(err);
124         }
125         if (attr->map_ifindex)
126                 ops = &bpf_map_offload_ops;
127         map = ops->map_alloc(attr);
128         if (IS_ERR(map))
129                 return map;
130         map->ops = ops;
131         map->map_type = type;
132         return map;
133 }
134
135 static u32 bpf_map_value_size(const struct bpf_map *map)
136 {
137         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
138             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
139             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
140             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
141                 return round_up(map->value_size, 8) * num_possible_cpus();
142         else if (IS_FD_MAP(map))
143                 return sizeof(u32);
144         else
145                 return  map->value_size;
146 }
147
148 static void maybe_wait_bpf_programs(struct bpf_map *map)
149 {
150         /* Wait for any running BPF programs to complete so that
151          * userspace, when we return to it, knows that all programs
152          * that could be running use the new map value.
153          */
154         if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
155             map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
156                 synchronize_rcu();
157 }
158
159 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
160                                 void *value, __u64 flags)
161 {
162         int err;
163
164         /* Need to create a kthread, thus must support schedule */
165         if (bpf_map_is_dev_bound(map)) {
166                 return bpf_map_offload_update_elem(map, key, value, flags);
167         } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
168                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
169                 return map->ops->map_update_elem(map, key, value, flags);
170         } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
171                    map->map_type == BPF_MAP_TYPE_SOCKMAP) {
172                 return sock_map_update_elem_sys(map, key, value, flags);
173         } else if (IS_FD_PROG_ARRAY(map)) {
174                 return bpf_fd_array_map_update_elem(map, f.file, key, value,
175                                                     flags);
176         }
177
178         bpf_disable_instrumentation();
179         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
180             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
181                 err = bpf_percpu_hash_update(map, key, value, flags);
182         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
183                 err = bpf_percpu_array_update(map, key, value, flags);
184         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
185                 err = bpf_percpu_cgroup_storage_update(map, key, value,
186                                                        flags);
187         } else if (IS_FD_ARRAY(map)) {
188                 rcu_read_lock();
189                 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
190                                                    flags);
191                 rcu_read_unlock();
192         } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
193                 rcu_read_lock();
194                 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
195                                                   flags);
196                 rcu_read_unlock();
197         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
198                 /* rcu_read_lock() is not needed */
199                 err = bpf_fd_reuseport_array_update_elem(map, key, value,
200                                                          flags);
201         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
202                    map->map_type == BPF_MAP_TYPE_STACK) {
203                 err = map->ops->map_push_elem(map, value, flags);
204         } else {
205                 rcu_read_lock();
206                 err = map->ops->map_update_elem(map, key, value, flags);
207                 rcu_read_unlock();
208         }
209         bpf_enable_instrumentation();
210         maybe_wait_bpf_programs(map);
211
212         return err;
213 }
214
215 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
216                               __u64 flags)
217 {
218         void *ptr;
219         int err;
220
221         if (bpf_map_is_dev_bound(map))
222                 return bpf_map_offload_lookup_elem(map, key, value);
223
224         bpf_disable_instrumentation();
225         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
226             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
227                 err = bpf_percpu_hash_copy(map, key, value);
228         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
229                 err = bpf_percpu_array_copy(map, key, value);
230         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
231                 err = bpf_percpu_cgroup_storage_copy(map, key, value);
232         } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
233                 err = bpf_stackmap_copy(map, key, value);
234         } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
235                 err = bpf_fd_array_map_lookup_elem(map, key, value);
236         } else if (IS_FD_HASH(map)) {
237                 err = bpf_fd_htab_map_lookup_elem(map, key, value);
238         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
239                 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
240         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
241                    map->map_type == BPF_MAP_TYPE_STACK) {
242                 err = map->ops->map_peek_elem(map, value);
243         } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
244                 /* struct_ops map requires directly updating "value" */
245                 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
246         } else {
247                 rcu_read_lock();
248                 if (map->ops->map_lookup_elem_sys_only)
249                         ptr = map->ops->map_lookup_elem_sys_only(map, key);
250                 else
251                         ptr = map->ops->map_lookup_elem(map, key);
252                 if (IS_ERR(ptr)) {
253                         err = PTR_ERR(ptr);
254                 } else if (!ptr) {
255                         err = -ENOENT;
256                 } else {
257                         err = 0;
258                         if (flags & BPF_F_LOCK)
259                                 /* lock 'ptr' and copy everything but lock */
260                                 copy_map_value_locked(map, value, ptr, true);
261                         else
262                                 copy_map_value(map, value, ptr);
263                         /* mask lock and timer, since value wasn't zero inited */
264                         check_and_init_map_value(map, value);
265                 }
266                 rcu_read_unlock();
267         }
268
269         bpf_enable_instrumentation();
270         maybe_wait_bpf_programs(map);
271
272         return err;
273 }
274
275 /* Please, do not use this function outside from the map creation path
276  * (e.g. in map update path) without taking care of setting the active
277  * memory cgroup (see at bpf_map_kmalloc_node() for example).
278  */
279 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
280 {
281         /* We really just want to fail instead of triggering OOM killer
282          * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
283          * which is used for lower order allocation requests.
284          *
285          * It has been observed that higher order allocation requests done by
286          * vmalloc with __GFP_NORETRY being set might fail due to not trying
287          * to reclaim memory from the page cache, thus we set
288          * __GFP_RETRY_MAYFAIL to avoid such situations.
289          */
290
291         const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
292         unsigned int flags = 0;
293         unsigned long align = 1;
294         void *area;
295
296         if (size >= SIZE_MAX)
297                 return NULL;
298
299         /* kmalloc()'ed memory can't be mmap()'ed */
300         if (mmapable) {
301                 BUG_ON(!PAGE_ALIGNED(size));
302                 align = SHMLBA;
303                 flags = VM_USERMAP;
304         } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
305                 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
306                                     numa_node);
307                 if (area != NULL)
308                         return area;
309         }
310
311         return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
312                         gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
313                         flags, numa_node, __builtin_return_address(0));
314 }
315
316 void *bpf_map_area_alloc(u64 size, int numa_node)
317 {
318         return __bpf_map_area_alloc(size, numa_node, false);
319 }
320
321 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
322 {
323         return __bpf_map_area_alloc(size, numa_node, true);
324 }
325
326 void bpf_map_area_free(void *area)
327 {
328         kvfree(area);
329 }
330
331 static u32 bpf_map_flags_retain_permanent(u32 flags)
332 {
333         /* Some map creation flags are not tied to the map object but
334          * rather to the map fd instead, so they have no meaning upon
335          * map object inspection since multiple file descriptors with
336          * different (access) properties can exist here. Thus, given
337          * this has zero meaning for the map itself, lets clear these
338          * from here.
339          */
340         return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
341 }
342
343 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
344 {
345         map->map_type = attr->map_type;
346         map->key_size = attr->key_size;
347         map->value_size = attr->value_size;
348         map->max_entries = attr->max_entries;
349         map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
350         map->numa_node = bpf_map_attr_numa_node(attr);
351 }
352
353 static int bpf_map_alloc_id(struct bpf_map *map)
354 {
355         int id;
356
357         idr_preload(GFP_KERNEL);
358         spin_lock_bh(&map_idr_lock);
359         id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
360         if (id > 0)
361                 map->id = id;
362         spin_unlock_bh(&map_idr_lock);
363         idr_preload_end();
364
365         if (WARN_ON_ONCE(!id))
366                 return -ENOSPC;
367
368         return id > 0 ? 0 : id;
369 }
370
371 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
372 {
373         unsigned long flags;
374
375         /* Offloaded maps are removed from the IDR store when their device
376          * disappears - even if someone holds an fd to them they are unusable,
377          * the memory is gone, all ops will fail; they are simply waiting for
378          * refcnt to drop to be freed.
379          */
380         if (!map->id)
381                 return;
382
383         if (do_idr_lock)
384                 spin_lock_irqsave(&map_idr_lock, flags);
385         else
386                 __acquire(&map_idr_lock);
387
388         idr_remove(&map_idr, map->id);
389         map->id = 0;
390
391         if (do_idr_lock)
392                 spin_unlock_irqrestore(&map_idr_lock, flags);
393         else
394                 __release(&map_idr_lock);
395 }
396
397 #ifdef CONFIG_MEMCG_KMEM
398 static void bpf_map_save_memcg(struct bpf_map *map)
399 {
400         map->memcg = get_mem_cgroup_from_mm(current->mm);
401 }
402
403 static void bpf_map_release_memcg(struct bpf_map *map)
404 {
405         mem_cgroup_put(map->memcg);
406 }
407
408 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
409                            int node)
410 {
411         struct mem_cgroup *old_memcg;
412         void *ptr;
413
414         old_memcg = set_active_memcg(map->memcg);
415         ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
416         set_active_memcg(old_memcg);
417
418         return ptr;
419 }
420
421 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
422 {
423         struct mem_cgroup *old_memcg;
424         void *ptr;
425
426         old_memcg = set_active_memcg(map->memcg);
427         ptr = kzalloc(size, flags | __GFP_ACCOUNT);
428         set_active_memcg(old_memcg);
429
430         return ptr;
431 }
432
433 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
434                                     size_t align, gfp_t flags)
435 {
436         struct mem_cgroup *old_memcg;
437         void __percpu *ptr;
438
439         old_memcg = set_active_memcg(map->memcg);
440         ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
441         set_active_memcg(old_memcg);
442
443         return ptr;
444 }
445
446 #else
447 static void bpf_map_save_memcg(struct bpf_map *map)
448 {
449 }
450
451 static void bpf_map_release_memcg(struct bpf_map *map)
452 {
453 }
454 #endif
455
456 /* called from workqueue */
457 static void bpf_map_free_deferred(struct work_struct *work)
458 {
459         struct bpf_map *map = container_of(work, struct bpf_map, work);
460
461         security_bpf_map_free(map);
462         bpf_map_release_memcg(map);
463         /* implementation dependent freeing */
464         map->ops->map_free(map);
465 }
466
467 static void bpf_map_put_uref(struct bpf_map *map)
468 {
469         if (atomic64_dec_and_test(&map->usercnt)) {
470                 if (map->ops->map_release_uref)
471                         map->ops->map_release_uref(map);
472         }
473 }
474
475 /* decrement map refcnt and schedule it for freeing via workqueue
476  * (unrelying map implementation ops->map_free() might sleep)
477  */
478 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
479 {
480         if (atomic64_dec_and_test(&map->refcnt)) {
481                 /* bpf_map_free_id() must be called first */
482                 bpf_map_free_id(map, do_idr_lock);
483                 btf_put(map->btf);
484                 INIT_WORK(&map->work, bpf_map_free_deferred);
485                 schedule_work(&map->work);
486         }
487 }
488
489 void bpf_map_put(struct bpf_map *map)
490 {
491         __bpf_map_put(map, true);
492 }
493 EXPORT_SYMBOL_GPL(bpf_map_put);
494
495 void bpf_map_put_with_uref(struct bpf_map *map)
496 {
497         bpf_map_put_uref(map);
498         bpf_map_put(map);
499 }
500
501 static int bpf_map_release(struct inode *inode, struct file *filp)
502 {
503         struct bpf_map *map = filp->private_data;
504
505         if (map->ops->map_release)
506                 map->ops->map_release(map, filp);
507
508         bpf_map_put_with_uref(map);
509         return 0;
510 }
511
512 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
513 {
514         fmode_t mode = f.file->f_mode;
515
516         /* Our file permissions may have been overridden by global
517          * map permissions facing syscall side.
518          */
519         if (READ_ONCE(map->frozen))
520                 mode &= ~FMODE_CAN_WRITE;
521         return mode;
522 }
523
524 #ifdef CONFIG_PROC_FS
525 /* Provides an approximation of the map's memory footprint.
526  * Used only to provide a backward compatibility and display
527  * a reasonable "memlock" info.
528  */
529 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
530 {
531         unsigned long size;
532
533         size = round_up(map->key_size + bpf_map_value_size(map), 8);
534
535         return round_up(map->max_entries * size, PAGE_SIZE);
536 }
537
538 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
539 {
540         const struct bpf_map *map = filp->private_data;
541         const struct bpf_array *array;
542         u32 type = 0, jited = 0;
543
544         if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
545                 array = container_of(map, struct bpf_array, map);
546                 type  = array->aux->type;
547                 jited = array->aux->jited;
548         }
549
550         seq_printf(m,
551                    "map_type:\t%u\n"
552                    "key_size:\t%u\n"
553                    "value_size:\t%u\n"
554                    "max_entries:\t%u\n"
555                    "map_flags:\t%#x\n"
556                    "memlock:\t%lu\n"
557                    "map_id:\t%u\n"
558                    "frozen:\t%u\n",
559                    map->map_type,
560                    map->key_size,
561                    map->value_size,
562                    map->max_entries,
563                    map->map_flags,
564                    bpf_map_memory_footprint(map),
565                    map->id,
566                    READ_ONCE(map->frozen));
567         if (type) {
568                 seq_printf(m, "owner_prog_type:\t%u\n", type);
569                 seq_printf(m, "owner_jited:\t%u\n", jited);
570         }
571 }
572 #endif
573
574 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
575                               loff_t *ppos)
576 {
577         /* We need this handler such that alloc_file() enables
578          * f_mode with FMODE_CAN_READ.
579          */
580         return -EINVAL;
581 }
582
583 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
584                                size_t siz, loff_t *ppos)
585 {
586         /* We need this handler such that alloc_file() enables
587          * f_mode with FMODE_CAN_WRITE.
588          */
589         return -EINVAL;
590 }
591
592 /* called for any extra memory-mapped regions (except initial) */
593 static void bpf_map_mmap_open(struct vm_area_struct *vma)
594 {
595         struct bpf_map *map = vma->vm_file->private_data;
596
597         if (vma->vm_flags & VM_MAYWRITE) {
598                 mutex_lock(&map->freeze_mutex);
599                 map->writecnt++;
600                 mutex_unlock(&map->freeze_mutex);
601         }
602 }
603
604 /* called for all unmapped memory region (including initial) */
605 static void bpf_map_mmap_close(struct vm_area_struct *vma)
606 {
607         struct bpf_map *map = vma->vm_file->private_data;
608
609         if (vma->vm_flags & VM_MAYWRITE) {
610                 mutex_lock(&map->freeze_mutex);
611                 map->writecnt--;
612                 mutex_unlock(&map->freeze_mutex);
613         }
614 }
615
616 static const struct vm_operations_struct bpf_map_default_vmops = {
617         .open           = bpf_map_mmap_open,
618         .close          = bpf_map_mmap_close,
619 };
620
621 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
622 {
623         struct bpf_map *map = filp->private_data;
624         int err;
625
626         if (!map->ops->map_mmap || map_value_has_spin_lock(map) ||
627             map_value_has_timer(map))
628                 return -ENOTSUPP;
629
630         if (!(vma->vm_flags & VM_SHARED))
631                 return -EINVAL;
632
633         mutex_lock(&map->freeze_mutex);
634
635         if (vma->vm_flags & VM_WRITE) {
636                 if (map->frozen) {
637                         err = -EPERM;
638                         goto out;
639                 }
640                 /* map is meant to be read-only, so do not allow mapping as
641                  * writable, because it's possible to leak a writable page
642                  * reference and allows user-space to still modify it after
643                  * freezing, while verifier will assume contents do not change
644                  */
645                 if (map->map_flags & BPF_F_RDONLY_PROG) {
646                         err = -EACCES;
647                         goto out;
648                 }
649         }
650
651         /* set default open/close callbacks */
652         vma->vm_ops = &bpf_map_default_vmops;
653         vma->vm_private_data = map;
654         vma->vm_flags &= ~VM_MAYEXEC;
655         if (!(vma->vm_flags & VM_WRITE))
656                 /* disallow re-mapping with PROT_WRITE */
657                 vma->vm_flags &= ~VM_MAYWRITE;
658
659         err = map->ops->map_mmap(map, vma);
660         if (err)
661                 goto out;
662
663         if (vma->vm_flags & VM_MAYWRITE)
664                 map->writecnt++;
665 out:
666         mutex_unlock(&map->freeze_mutex);
667         return err;
668 }
669
670 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
671 {
672         struct bpf_map *map = filp->private_data;
673
674         if (map->ops->map_poll)
675                 return map->ops->map_poll(map, filp, pts);
676
677         return EPOLLERR;
678 }
679
680 const struct file_operations bpf_map_fops = {
681 #ifdef CONFIG_PROC_FS
682         .show_fdinfo    = bpf_map_show_fdinfo,
683 #endif
684         .release        = bpf_map_release,
685         .read           = bpf_dummy_read,
686         .write          = bpf_dummy_write,
687         .mmap           = bpf_map_mmap,
688         .poll           = bpf_map_poll,
689 };
690
691 int bpf_map_new_fd(struct bpf_map *map, int flags)
692 {
693         int ret;
694
695         ret = security_bpf_map(map, OPEN_FMODE(flags));
696         if (ret < 0)
697                 return ret;
698
699         return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
700                                 flags | O_CLOEXEC);
701 }
702
703 int bpf_get_file_flag(int flags)
704 {
705         if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
706                 return -EINVAL;
707         if (flags & BPF_F_RDONLY)
708                 return O_RDONLY;
709         if (flags & BPF_F_WRONLY)
710                 return O_WRONLY;
711         return O_RDWR;
712 }
713
714 /* helper macro to check that unused fields 'union bpf_attr' are zero */
715 #define CHECK_ATTR(CMD) \
716         memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
717                    sizeof(attr->CMD##_LAST_FIELD), 0, \
718                    sizeof(*attr) - \
719                    offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
720                    sizeof(attr->CMD##_LAST_FIELD)) != NULL
721
722 /* dst and src must have at least "size" number of bytes.
723  * Return strlen on success and < 0 on error.
724  */
725 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
726 {
727         const char *end = src + size;
728         const char *orig_src = src;
729
730         memset(dst, 0, size);
731         /* Copy all isalnum(), '_' and '.' chars. */
732         while (src < end && *src) {
733                 if (!isalnum(*src) &&
734                     *src != '_' && *src != '.')
735                         return -EINVAL;
736                 *dst++ = *src++;
737         }
738
739         /* No '\0' found in "size" number of bytes */
740         if (src == end)
741                 return -EINVAL;
742
743         return src - orig_src;
744 }
745
746 int map_check_no_btf(const struct bpf_map *map,
747                      const struct btf *btf,
748                      const struct btf_type *key_type,
749                      const struct btf_type *value_type)
750 {
751         return -ENOTSUPP;
752 }
753
754 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
755                          u32 btf_key_id, u32 btf_value_id)
756 {
757         const struct btf_type *key_type, *value_type;
758         u32 key_size, value_size;
759         int ret = 0;
760
761         /* Some maps allow key to be unspecified. */
762         if (btf_key_id) {
763                 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
764                 if (!key_type || key_size != map->key_size)
765                         return -EINVAL;
766         } else {
767                 key_type = btf_type_by_id(btf, 0);
768                 if (!map->ops->map_check_btf)
769                         return -EINVAL;
770         }
771
772         value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
773         if (!value_type || value_size != map->value_size)
774                 return -EINVAL;
775
776         map->spin_lock_off = btf_find_spin_lock(btf, value_type);
777
778         if (map_value_has_spin_lock(map)) {
779                 if (map->map_flags & BPF_F_RDONLY_PROG)
780                         return -EACCES;
781                 if (map->map_type != BPF_MAP_TYPE_HASH &&
782                     map->map_type != BPF_MAP_TYPE_ARRAY &&
783                     map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
784                     map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
785                     map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
786                     map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
787                         return -ENOTSUPP;
788                 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
789                     map->value_size) {
790                         WARN_ONCE(1,
791                                   "verifier bug spin_lock_off %d value_size %d\n",
792                                   map->spin_lock_off, map->value_size);
793                         return -EFAULT;
794                 }
795         }
796
797         map->timer_off = btf_find_timer(btf, value_type);
798         if (map_value_has_timer(map)) {
799                 if (map->map_flags & BPF_F_RDONLY_PROG)
800                         return -EACCES;
801                 if (map->map_type != BPF_MAP_TYPE_HASH &&
802                     map->map_type != BPF_MAP_TYPE_LRU_HASH &&
803                     map->map_type != BPF_MAP_TYPE_ARRAY)
804                         return -EOPNOTSUPP;
805         }
806
807         if (map->ops->map_check_btf)
808                 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
809
810         return ret;
811 }
812
813 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
814 /* called via syscall */
815 static int map_create(union bpf_attr *attr)
816 {
817         int numa_node = bpf_map_attr_numa_node(attr);
818         struct bpf_map *map;
819         int f_flags;
820         int err;
821
822         err = CHECK_ATTR(BPF_MAP_CREATE);
823         if (err)
824                 return -EINVAL;
825
826         if (attr->btf_vmlinux_value_type_id) {
827                 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
828                     attr->btf_key_type_id || attr->btf_value_type_id)
829                         return -EINVAL;
830         } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
831                 return -EINVAL;
832         }
833
834         f_flags = bpf_get_file_flag(attr->map_flags);
835         if (f_flags < 0)
836                 return f_flags;
837
838         if (numa_node != NUMA_NO_NODE &&
839             ((unsigned int)numa_node >= nr_node_ids ||
840              !node_online(numa_node)))
841                 return -EINVAL;
842
843         /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
844         map = find_and_alloc_map(attr);
845         if (IS_ERR(map))
846                 return PTR_ERR(map);
847
848         err = bpf_obj_name_cpy(map->name, attr->map_name,
849                                sizeof(attr->map_name));
850         if (err < 0)
851                 goto free_map;
852
853         atomic64_set(&map->refcnt, 1);
854         atomic64_set(&map->usercnt, 1);
855         mutex_init(&map->freeze_mutex);
856
857         map->spin_lock_off = -EINVAL;
858         map->timer_off = -EINVAL;
859         if (attr->btf_key_type_id || attr->btf_value_type_id ||
860             /* Even the map's value is a kernel's struct,
861              * the bpf_prog.o must have BTF to begin with
862              * to figure out the corresponding kernel's
863              * counter part.  Thus, attr->btf_fd has
864              * to be valid also.
865              */
866             attr->btf_vmlinux_value_type_id) {
867                 struct btf *btf;
868
869                 btf = btf_get_by_fd(attr->btf_fd);
870                 if (IS_ERR(btf)) {
871                         err = PTR_ERR(btf);
872                         goto free_map;
873                 }
874                 if (btf_is_kernel(btf)) {
875                         btf_put(btf);
876                         err = -EACCES;
877                         goto free_map;
878                 }
879                 map->btf = btf;
880
881                 if (attr->btf_value_type_id) {
882                         err = map_check_btf(map, btf, attr->btf_key_type_id,
883                                             attr->btf_value_type_id);
884                         if (err)
885                                 goto free_map;
886                 }
887
888                 map->btf_key_type_id = attr->btf_key_type_id;
889                 map->btf_value_type_id = attr->btf_value_type_id;
890                 map->btf_vmlinux_value_type_id =
891                         attr->btf_vmlinux_value_type_id;
892         }
893
894         err = security_bpf_map_alloc(map);
895         if (err)
896                 goto free_map;
897
898         err = bpf_map_alloc_id(map);
899         if (err)
900                 goto free_map_sec;
901
902         bpf_map_save_memcg(map);
903
904         err = bpf_map_new_fd(map, f_flags);
905         if (err < 0) {
906                 /* failed to allocate fd.
907                  * bpf_map_put_with_uref() is needed because the above
908                  * bpf_map_alloc_id() has published the map
909                  * to the userspace and the userspace may
910                  * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
911                  */
912                 bpf_map_put_with_uref(map);
913                 return err;
914         }
915
916         return err;
917
918 free_map_sec:
919         security_bpf_map_free(map);
920 free_map:
921         btf_put(map->btf);
922         map->ops->map_free(map);
923         return err;
924 }
925
926 /* if error is returned, fd is released.
927  * On success caller should complete fd access with matching fdput()
928  */
929 struct bpf_map *__bpf_map_get(struct fd f)
930 {
931         if (!f.file)
932                 return ERR_PTR(-EBADF);
933         if (f.file->f_op != &bpf_map_fops) {
934                 fdput(f);
935                 return ERR_PTR(-EINVAL);
936         }
937
938         return f.file->private_data;
939 }
940
941 void bpf_map_inc(struct bpf_map *map)
942 {
943         atomic64_inc(&map->refcnt);
944 }
945 EXPORT_SYMBOL_GPL(bpf_map_inc);
946
947 void bpf_map_inc_with_uref(struct bpf_map *map)
948 {
949         atomic64_inc(&map->refcnt);
950         atomic64_inc(&map->usercnt);
951 }
952 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
953
954 struct bpf_map *bpf_map_get(u32 ufd)
955 {
956         struct fd f = fdget(ufd);
957         struct bpf_map *map;
958
959         map = __bpf_map_get(f);
960         if (IS_ERR(map))
961                 return map;
962
963         bpf_map_inc(map);
964         fdput(f);
965
966         return map;
967 }
968
969 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
970 {
971         struct fd f = fdget(ufd);
972         struct bpf_map *map;
973
974         map = __bpf_map_get(f);
975         if (IS_ERR(map))
976                 return map;
977
978         bpf_map_inc_with_uref(map);
979         fdput(f);
980
981         return map;
982 }
983
984 /* map_idr_lock should have been held */
985 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
986 {
987         int refold;
988
989         refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
990         if (!refold)
991                 return ERR_PTR(-ENOENT);
992         if (uref)
993                 atomic64_inc(&map->usercnt);
994
995         return map;
996 }
997
998 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
999 {
1000         spin_lock_bh(&map_idr_lock);
1001         map = __bpf_map_inc_not_zero(map, false);
1002         spin_unlock_bh(&map_idr_lock);
1003
1004         return map;
1005 }
1006 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1007
1008 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1009 {
1010         return -ENOTSUPP;
1011 }
1012
1013 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1014 {
1015         if (key_size)
1016                 return vmemdup_user(ukey, key_size);
1017
1018         if (ukey)
1019                 return ERR_PTR(-EINVAL);
1020
1021         return NULL;
1022 }
1023
1024 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1025 {
1026         if (key_size)
1027                 return kvmemdup_bpfptr(ukey, key_size);
1028
1029         if (!bpfptr_is_null(ukey))
1030                 return ERR_PTR(-EINVAL);
1031
1032         return NULL;
1033 }
1034
1035 /* last field in 'union bpf_attr' used by this command */
1036 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1037
1038 static int map_lookup_elem(union bpf_attr *attr)
1039 {
1040         void __user *ukey = u64_to_user_ptr(attr->key);
1041         void __user *uvalue = u64_to_user_ptr(attr->value);
1042         int ufd = attr->map_fd;
1043         struct bpf_map *map;
1044         void *key, *value;
1045         u32 value_size;
1046         struct fd f;
1047         int err;
1048
1049         if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1050                 return -EINVAL;
1051
1052         if (attr->flags & ~BPF_F_LOCK)
1053                 return -EINVAL;
1054
1055         f = fdget(ufd);
1056         map = __bpf_map_get(f);
1057         if (IS_ERR(map))
1058                 return PTR_ERR(map);
1059         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1060                 err = -EPERM;
1061                 goto err_put;
1062         }
1063
1064         if ((attr->flags & BPF_F_LOCK) &&
1065             !map_value_has_spin_lock(map)) {
1066                 err = -EINVAL;
1067                 goto err_put;
1068         }
1069
1070         key = __bpf_copy_key(ukey, map->key_size);
1071         if (IS_ERR(key)) {
1072                 err = PTR_ERR(key);
1073                 goto err_put;
1074         }
1075
1076         value_size = bpf_map_value_size(map);
1077
1078         err = -ENOMEM;
1079         value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1080         if (!value)
1081                 goto free_key;
1082
1083         err = bpf_map_copy_value(map, key, value, attr->flags);
1084         if (err)
1085                 goto free_value;
1086
1087         err = -EFAULT;
1088         if (copy_to_user(uvalue, value, value_size) != 0)
1089                 goto free_value;
1090
1091         err = 0;
1092
1093 free_value:
1094         kvfree(value);
1095 free_key:
1096         kvfree(key);
1097 err_put:
1098         fdput(f);
1099         return err;
1100 }
1101
1102
1103 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1104
1105 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1106 {
1107         bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1108         bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1109         int ufd = attr->map_fd;
1110         struct bpf_map *map;
1111         void *key, *value;
1112         u32 value_size;
1113         struct fd f;
1114         int err;
1115
1116         if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1117                 return -EINVAL;
1118
1119         f = fdget(ufd);
1120         map = __bpf_map_get(f);
1121         if (IS_ERR(map))
1122                 return PTR_ERR(map);
1123         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1124                 err = -EPERM;
1125                 goto err_put;
1126         }
1127
1128         if ((attr->flags & BPF_F_LOCK) &&
1129             !map_value_has_spin_lock(map)) {
1130                 err = -EINVAL;
1131                 goto err_put;
1132         }
1133
1134         key = ___bpf_copy_key(ukey, map->key_size);
1135         if (IS_ERR(key)) {
1136                 err = PTR_ERR(key);
1137                 goto err_put;
1138         }
1139
1140         value_size = bpf_map_value_size(map);
1141
1142         err = -ENOMEM;
1143         value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1144         if (!value)
1145                 goto free_key;
1146
1147         err = -EFAULT;
1148         if (copy_from_bpfptr(value, uvalue, value_size) != 0)
1149                 goto free_value;
1150
1151         err = bpf_map_update_value(map, f, key, value, attr->flags);
1152
1153 free_value:
1154         kvfree(value);
1155 free_key:
1156         kvfree(key);
1157 err_put:
1158         fdput(f);
1159         return err;
1160 }
1161
1162 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1163
1164 static int map_delete_elem(union bpf_attr *attr)
1165 {
1166         void __user *ukey = u64_to_user_ptr(attr->key);
1167         int ufd = attr->map_fd;
1168         struct bpf_map *map;
1169         struct fd f;
1170         void *key;
1171         int err;
1172
1173         if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1174                 return -EINVAL;
1175
1176         f = fdget(ufd);
1177         map = __bpf_map_get(f);
1178         if (IS_ERR(map))
1179                 return PTR_ERR(map);
1180         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1181                 err = -EPERM;
1182                 goto err_put;
1183         }
1184
1185         key = __bpf_copy_key(ukey, map->key_size);
1186         if (IS_ERR(key)) {
1187                 err = PTR_ERR(key);
1188                 goto err_put;
1189         }
1190
1191         if (bpf_map_is_dev_bound(map)) {
1192                 err = bpf_map_offload_delete_elem(map, key);
1193                 goto out;
1194         } else if (IS_FD_PROG_ARRAY(map) ||
1195                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1196                 /* These maps require sleepable context */
1197                 err = map->ops->map_delete_elem(map, key);
1198                 goto out;
1199         }
1200
1201         bpf_disable_instrumentation();
1202         rcu_read_lock();
1203         err = map->ops->map_delete_elem(map, key);
1204         rcu_read_unlock();
1205         bpf_enable_instrumentation();
1206         maybe_wait_bpf_programs(map);
1207 out:
1208         kvfree(key);
1209 err_put:
1210         fdput(f);
1211         return err;
1212 }
1213
1214 /* last field in 'union bpf_attr' used by this command */
1215 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1216
1217 static int map_get_next_key(union bpf_attr *attr)
1218 {
1219         void __user *ukey = u64_to_user_ptr(attr->key);
1220         void __user *unext_key = u64_to_user_ptr(attr->next_key);
1221         int ufd = attr->map_fd;
1222         struct bpf_map *map;
1223         void *key, *next_key;
1224         struct fd f;
1225         int err;
1226
1227         if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1228                 return -EINVAL;
1229
1230         f = fdget(ufd);
1231         map = __bpf_map_get(f);
1232         if (IS_ERR(map))
1233                 return PTR_ERR(map);
1234         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1235                 err = -EPERM;
1236                 goto err_put;
1237         }
1238
1239         if (ukey) {
1240                 key = __bpf_copy_key(ukey, map->key_size);
1241                 if (IS_ERR(key)) {
1242                         err = PTR_ERR(key);
1243                         goto err_put;
1244                 }
1245         } else {
1246                 key = NULL;
1247         }
1248
1249         err = -ENOMEM;
1250         next_key = kvmalloc(map->key_size, GFP_USER);
1251         if (!next_key)
1252                 goto free_key;
1253
1254         if (bpf_map_is_dev_bound(map)) {
1255                 err = bpf_map_offload_get_next_key(map, key, next_key);
1256                 goto out;
1257         }
1258
1259         rcu_read_lock();
1260         err = map->ops->map_get_next_key(map, key, next_key);
1261         rcu_read_unlock();
1262 out:
1263         if (err)
1264                 goto free_next_key;
1265
1266         err = -EFAULT;
1267         if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1268                 goto free_next_key;
1269
1270         err = 0;
1271
1272 free_next_key:
1273         kvfree(next_key);
1274 free_key:
1275         kvfree(key);
1276 err_put:
1277         fdput(f);
1278         return err;
1279 }
1280
1281 int generic_map_delete_batch(struct bpf_map *map,
1282                              const union bpf_attr *attr,
1283                              union bpf_attr __user *uattr)
1284 {
1285         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1286         u32 cp, max_count;
1287         int err = 0;
1288         void *key;
1289
1290         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1291                 return -EINVAL;
1292
1293         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1294             !map_value_has_spin_lock(map)) {
1295                 return -EINVAL;
1296         }
1297
1298         max_count = attr->batch.count;
1299         if (!max_count)
1300                 return 0;
1301
1302         key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1303         if (!key)
1304                 return -ENOMEM;
1305
1306         for (cp = 0; cp < max_count; cp++) {
1307                 err = -EFAULT;
1308                 if (copy_from_user(key, keys + cp * map->key_size,
1309                                    map->key_size))
1310                         break;
1311
1312                 if (bpf_map_is_dev_bound(map)) {
1313                         err = bpf_map_offload_delete_elem(map, key);
1314                         break;
1315                 }
1316
1317                 bpf_disable_instrumentation();
1318                 rcu_read_lock();
1319                 err = map->ops->map_delete_elem(map, key);
1320                 rcu_read_unlock();
1321                 bpf_enable_instrumentation();
1322                 maybe_wait_bpf_programs(map);
1323                 if (err)
1324                         break;
1325         }
1326         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1327                 err = -EFAULT;
1328
1329         kvfree(key);
1330         return err;
1331 }
1332
1333 int generic_map_update_batch(struct bpf_map *map,
1334                              const union bpf_attr *attr,
1335                              union bpf_attr __user *uattr)
1336 {
1337         void __user *values = u64_to_user_ptr(attr->batch.values);
1338         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1339         u32 value_size, cp, max_count;
1340         int ufd = attr->map_fd;
1341         void *key, *value;
1342         struct fd f;
1343         int err = 0;
1344
1345         f = fdget(ufd);
1346         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1347                 return -EINVAL;
1348
1349         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1350             !map_value_has_spin_lock(map)) {
1351                 return -EINVAL;
1352         }
1353
1354         value_size = bpf_map_value_size(map);
1355
1356         max_count = attr->batch.count;
1357         if (!max_count)
1358                 return 0;
1359
1360         key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1361         if (!key)
1362                 return -ENOMEM;
1363
1364         value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1365         if (!value) {
1366                 kvfree(key);
1367                 return -ENOMEM;
1368         }
1369
1370         for (cp = 0; cp < max_count; cp++) {
1371                 err = -EFAULT;
1372                 if (copy_from_user(key, keys + cp * map->key_size,
1373                     map->key_size) ||
1374                     copy_from_user(value, values + cp * value_size, value_size))
1375                         break;
1376
1377                 err = bpf_map_update_value(map, f, key, value,
1378                                            attr->batch.elem_flags);
1379
1380                 if (err)
1381                         break;
1382         }
1383
1384         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1385                 err = -EFAULT;
1386
1387         kvfree(value);
1388         kvfree(key);
1389         return err;
1390 }
1391
1392 #define MAP_LOOKUP_RETRIES 3
1393
1394 int generic_map_lookup_batch(struct bpf_map *map,
1395                                     const union bpf_attr *attr,
1396                                     union bpf_attr __user *uattr)
1397 {
1398         void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1399         void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1400         void __user *values = u64_to_user_ptr(attr->batch.values);
1401         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1402         void *buf, *buf_prevkey, *prev_key, *key, *value;
1403         int err, retry = MAP_LOOKUP_RETRIES;
1404         u32 value_size, cp, max_count;
1405
1406         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1407                 return -EINVAL;
1408
1409         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1410             !map_value_has_spin_lock(map))
1411                 return -EINVAL;
1412
1413         value_size = bpf_map_value_size(map);
1414
1415         max_count = attr->batch.count;
1416         if (!max_count)
1417                 return 0;
1418
1419         if (put_user(0, &uattr->batch.count))
1420                 return -EFAULT;
1421
1422         buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1423         if (!buf_prevkey)
1424                 return -ENOMEM;
1425
1426         buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1427         if (!buf) {
1428                 kvfree(buf_prevkey);
1429                 return -ENOMEM;
1430         }
1431
1432         err = -EFAULT;
1433         prev_key = NULL;
1434         if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1435                 goto free_buf;
1436         key = buf;
1437         value = key + map->key_size;
1438         if (ubatch)
1439                 prev_key = buf_prevkey;
1440
1441         for (cp = 0; cp < max_count;) {
1442                 rcu_read_lock();
1443                 err = map->ops->map_get_next_key(map, prev_key, key);
1444                 rcu_read_unlock();
1445                 if (err)
1446                         break;
1447                 err = bpf_map_copy_value(map, key, value,
1448                                          attr->batch.elem_flags);
1449
1450                 if (err == -ENOENT) {
1451                         if (retry) {
1452                                 retry--;
1453                                 continue;
1454                         }
1455                         err = -EINTR;
1456                         break;
1457                 }
1458
1459                 if (err)
1460                         goto free_buf;
1461
1462                 if (copy_to_user(keys + cp * map->key_size, key,
1463                                  map->key_size)) {
1464                         err = -EFAULT;
1465                         goto free_buf;
1466                 }
1467                 if (copy_to_user(values + cp * value_size, value, value_size)) {
1468                         err = -EFAULT;
1469                         goto free_buf;
1470                 }
1471
1472                 if (!prev_key)
1473                         prev_key = buf_prevkey;
1474
1475                 swap(prev_key, key);
1476                 retry = MAP_LOOKUP_RETRIES;
1477                 cp++;
1478         }
1479
1480         if (err == -EFAULT)
1481                 goto free_buf;
1482
1483         if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1484                     (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1485                 err = -EFAULT;
1486
1487 free_buf:
1488         kvfree(buf_prevkey);
1489         kvfree(buf);
1490         return err;
1491 }
1492
1493 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1494
1495 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1496 {
1497         void __user *ukey = u64_to_user_ptr(attr->key);
1498         void __user *uvalue = u64_to_user_ptr(attr->value);
1499         int ufd = attr->map_fd;
1500         struct bpf_map *map;
1501         void *key, *value;
1502         u32 value_size;
1503         struct fd f;
1504         int err;
1505
1506         if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1507                 return -EINVAL;
1508
1509         if (attr->flags & ~BPF_F_LOCK)
1510                 return -EINVAL;
1511
1512         f = fdget(ufd);
1513         map = __bpf_map_get(f);
1514         if (IS_ERR(map))
1515                 return PTR_ERR(map);
1516         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1517             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1518                 err = -EPERM;
1519                 goto err_put;
1520         }
1521
1522         if (attr->flags &&
1523             (map->map_type == BPF_MAP_TYPE_QUEUE ||
1524              map->map_type == BPF_MAP_TYPE_STACK)) {
1525                 err = -EINVAL;
1526                 goto err_put;
1527         }
1528
1529         if ((attr->flags & BPF_F_LOCK) &&
1530             !map_value_has_spin_lock(map)) {
1531                 err = -EINVAL;
1532                 goto err_put;
1533         }
1534
1535         key = __bpf_copy_key(ukey, map->key_size);
1536         if (IS_ERR(key)) {
1537                 err = PTR_ERR(key);
1538                 goto err_put;
1539         }
1540
1541         value_size = bpf_map_value_size(map);
1542
1543         err = -ENOMEM;
1544         value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1545         if (!value)
1546                 goto free_key;
1547
1548         err = -ENOTSUPP;
1549         if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1550             map->map_type == BPF_MAP_TYPE_STACK) {
1551                 err = map->ops->map_pop_elem(map, value);
1552         } else if (map->map_type == BPF_MAP_TYPE_HASH ||
1553                    map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1554                    map->map_type == BPF_MAP_TYPE_LRU_HASH ||
1555                    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
1556                 if (!bpf_map_is_dev_bound(map)) {
1557                         bpf_disable_instrumentation();
1558                         rcu_read_lock();
1559                         err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
1560                         rcu_read_unlock();
1561                         bpf_enable_instrumentation();
1562                 }
1563         }
1564
1565         if (err)
1566                 goto free_value;
1567
1568         if (copy_to_user(uvalue, value, value_size) != 0) {
1569                 err = -EFAULT;
1570                 goto free_value;
1571         }
1572
1573         err = 0;
1574
1575 free_value:
1576         kvfree(value);
1577 free_key:
1578         kvfree(key);
1579 err_put:
1580         fdput(f);
1581         return err;
1582 }
1583
1584 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1585
1586 static int map_freeze(const union bpf_attr *attr)
1587 {
1588         int err = 0, ufd = attr->map_fd;
1589         struct bpf_map *map;
1590         struct fd f;
1591
1592         if (CHECK_ATTR(BPF_MAP_FREEZE))
1593                 return -EINVAL;
1594
1595         f = fdget(ufd);
1596         map = __bpf_map_get(f);
1597         if (IS_ERR(map))
1598                 return PTR_ERR(map);
1599
1600         if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS ||
1601             map_value_has_timer(map)) {
1602                 fdput(f);
1603                 return -ENOTSUPP;
1604         }
1605
1606         mutex_lock(&map->freeze_mutex);
1607
1608         if (map->writecnt) {
1609                 err = -EBUSY;
1610                 goto err_put;
1611         }
1612         if (READ_ONCE(map->frozen)) {
1613                 err = -EBUSY;
1614                 goto err_put;
1615         }
1616         if (!bpf_capable()) {
1617                 err = -EPERM;
1618                 goto err_put;
1619         }
1620
1621         WRITE_ONCE(map->frozen, true);
1622 err_put:
1623         mutex_unlock(&map->freeze_mutex);
1624         fdput(f);
1625         return err;
1626 }
1627
1628 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1629 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1630         [_id] = & _name ## _prog_ops,
1631 #define BPF_MAP_TYPE(_id, _ops)
1632 #define BPF_LINK_TYPE(_id, _name)
1633 #include <linux/bpf_types.h>
1634 #undef BPF_PROG_TYPE
1635 #undef BPF_MAP_TYPE
1636 #undef BPF_LINK_TYPE
1637 };
1638
1639 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1640 {
1641         const struct bpf_prog_ops *ops;
1642
1643         if (type >= ARRAY_SIZE(bpf_prog_types))
1644                 return -EINVAL;
1645         type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1646         ops = bpf_prog_types[type];
1647         if (!ops)
1648                 return -EINVAL;
1649
1650         if (!bpf_prog_is_dev_bound(prog->aux))
1651                 prog->aux->ops = ops;
1652         else
1653                 prog->aux->ops = &bpf_offload_prog_ops;
1654         prog->type = type;
1655         return 0;
1656 }
1657
1658 enum bpf_audit {
1659         BPF_AUDIT_LOAD,
1660         BPF_AUDIT_UNLOAD,
1661         BPF_AUDIT_MAX,
1662 };
1663
1664 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1665         [BPF_AUDIT_LOAD]   = "LOAD",
1666         [BPF_AUDIT_UNLOAD] = "UNLOAD",
1667 };
1668
1669 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1670 {
1671         struct audit_context *ctx = NULL;
1672         struct audit_buffer *ab;
1673
1674         if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1675                 return;
1676         if (audit_enabled == AUDIT_OFF)
1677                 return;
1678         if (op == BPF_AUDIT_LOAD)
1679                 ctx = audit_context();
1680         ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1681         if (unlikely(!ab))
1682                 return;
1683         audit_log_format(ab, "prog-id=%u op=%s",
1684                          prog->aux->id, bpf_audit_str[op]);
1685         audit_log_end(ab);
1686 }
1687
1688 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1689 {
1690         int id;
1691
1692         idr_preload(GFP_KERNEL);
1693         spin_lock_bh(&prog_idr_lock);
1694         id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1695         if (id > 0)
1696                 prog->aux->id = id;
1697         spin_unlock_bh(&prog_idr_lock);
1698         idr_preload_end();
1699
1700         /* id is in [1, INT_MAX) */
1701         if (WARN_ON_ONCE(!id))
1702                 return -ENOSPC;
1703
1704         return id > 0 ? 0 : id;
1705 }
1706
1707 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1708 {
1709         unsigned long flags;
1710
1711         /* cBPF to eBPF migrations are currently not in the idr store.
1712          * Offloaded programs are removed from the store when their device
1713          * disappears - even if someone grabs an fd to them they are unusable,
1714          * simply waiting for refcnt to drop to be freed.
1715          */
1716         if (!prog->aux->id)
1717                 return;
1718
1719         if (do_idr_lock)
1720                 spin_lock_irqsave(&prog_idr_lock, flags);
1721         else
1722                 __acquire(&prog_idr_lock);
1723
1724         idr_remove(&prog_idr, prog->aux->id);
1725         prog->aux->id = 0;
1726
1727         if (do_idr_lock)
1728                 spin_unlock_irqrestore(&prog_idr_lock, flags);
1729         else
1730                 __release(&prog_idr_lock);
1731 }
1732
1733 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1734 {
1735         struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1736
1737         kvfree(aux->func_info);
1738         kfree(aux->func_info_aux);
1739         free_uid(aux->user);
1740         security_bpf_prog_free(aux);
1741         bpf_prog_free(aux->prog);
1742 }
1743
1744 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1745 {
1746         bpf_prog_kallsyms_del_all(prog);
1747         btf_put(prog->aux->btf);
1748         kvfree(prog->aux->jited_linfo);
1749         kvfree(prog->aux->linfo);
1750         kfree(prog->aux->kfunc_tab);
1751         if (prog->aux->attach_btf)
1752                 btf_put(prog->aux->attach_btf);
1753
1754         if (deferred) {
1755                 if (prog->aux->sleepable)
1756                         call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
1757                 else
1758                         call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1759         } else {
1760                 __bpf_prog_put_rcu(&prog->aux->rcu);
1761         }
1762 }
1763
1764 static void bpf_prog_put_deferred(struct work_struct *work)
1765 {
1766         struct bpf_prog_aux *aux;
1767         struct bpf_prog *prog;
1768
1769         aux = container_of(work, struct bpf_prog_aux, work);
1770         prog = aux->prog;
1771         perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1772         bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1773         __bpf_prog_put_noref(prog, true);
1774 }
1775
1776 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1777 {
1778         struct bpf_prog_aux *aux = prog->aux;
1779
1780         if (atomic64_dec_and_test(&aux->refcnt)) {
1781                 /* bpf_prog_free_id() must be called first */
1782                 bpf_prog_free_id(prog, do_idr_lock);
1783
1784                 if (in_irq() || irqs_disabled()) {
1785                         INIT_WORK(&aux->work, bpf_prog_put_deferred);
1786                         schedule_work(&aux->work);
1787                 } else {
1788                         bpf_prog_put_deferred(&aux->work);
1789                 }
1790         }
1791 }
1792
1793 void bpf_prog_put(struct bpf_prog *prog)
1794 {
1795         __bpf_prog_put(prog, true);
1796 }
1797 EXPORT_SYMBOL_GPL(bpf_prog_put);
1798
1799 static int bpf_prog_release(struct inode *inode, struct file *filp)
1800 {
1801         struct bpf_prog *prog = filp->private_data;
1802
1803         bpf_prog_put(prog);
1804         return 0;
1805 }
1806
1807 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1808                                struct bpf_prog_stats *stats)
1809 {
1810         u64 nsecs = 0, cnt = 0, misses = 0;
1811         int cpu;
1812
1813         for_each_possible_cpu(cpu) {
1814                 const struct bpf_prog_stats *st;
1815                 unsigned int start;
1816                 u64 tnsecs, tcnt, tmisses;
1817
1818                 st = per_cpu_ptr(prog->stats, cpu);
1819                 do {
1820                         start = u64_stats_fetch_begin_irq(&st->syncp);
1821                         tnsecs = st->nsecs;
1822                         tcnt = st->cnt;
1823                         tmisses = st->misses;
1824                 } while (u64_stats_fetch_retry_irq(&st->syncp, start));
1825                 nsecs += tnsecs;
1826                 cnt += tcnt;
1827                 misses += tmisses;
1828         }
1829         stats->nsecs = nsecs;
1830         stats->cnt = cnt;
1831         stats->misses = misses;
1832 }
1833
1834 #ifdef CONFIG_PROC_FS
1835 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1836 {
1837         const struct bpf_prog *prog = filp->private_data;
1838         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1839         struct bpf_prog_stats stats;
1840
1841         bpf_prog_get_stats(prog, &stats);
1842         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1843         seq_printf(m,
1844                    "prog_type:\t%u\n"
1845                    "prog_jited:\t%u\n"
1846                    "prog_tag:\t%s\n"
1847                    "memlock:\t%llu\n"
1848                    "prog_id:\t%u\n"
1849                    "run_time_ns:\t%llu\n"
1850                    "run_cnt:\t%llu\n"
1851                    "recursion_misses:\t%llu\n",
1852                    prog->type,
1853                    prog->jited,
1854                    prog_tag,
1855                    prog->pages * 1ULL << PAGE_SHIFT,
1856                    prog->aux->id,
1857                    stats.nsecs,
1858                    stats.cnt,
1859                    stats.misses);
1860 }
1861 #endif
1862
1863 const struct file_operations bpf_prog_fops = {
1864 #ifdef CONFIG_PROC_FS
1865         .show_fdinfo    = bpf_prog_show_fdinfo,
1866 #endif
1867         .release        = bpf_prog_release,
1868         .read           = bpf_dummy_read,
1869         .write          = bpf_dummy_write,
1870 };
1871
1872 int bpf_prog_new_fd(struct bpf_prog *prog)
1873 {
1874         int ret;
1875
1876         ret = security_bpf_prog(prog);
1877         if (ret < 0)
1878                 return ret;
1879
1880         return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1881                                 O_RDWR | O_CLOEXEC);
1882 }
1883
1884 static struct bpf_prog *____bpf_prog_get(struct fd f)
1885 {
1886         if (!f.file)
1887                 return ERR_PTR(-EBADF);
1888         if (f.file->f_op != &bpf_prog_fops) {
1889                 fdput(f);
1890                 return ERR_PTR(-EINVAL);
1891         }
1892
1893         return f.file->private_data;
1894 }
1895
1896 void bpf_prog_add(struct bpf_prog *prog, int i)
1897 {
1898         atomic64_add(i, &prog->aux->refcnt);
1899 }
1900 EXPORT_SYMBOL_GPL(bpf_prog_add);
1901
1902 void bpf_prog_sub(struct bpf_prog *prog, int i)
1903 {
1904         /* Only to be used for undoing previous bpf_prog_add() in some
1905          * error path. We still know that another entity in our call
1906          * path holds a reference to the program, thus atomic_sub() can
1907          * be safely used in such cases!
1908          */
1909         WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1910 }
1911 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1912
1913 void bpf_prog_inc(struct bpf_prog *prog)
1914 {
1915         atomic64_inc(&prog->aux->refcnt);
1916 }
1917 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1918
1919 /* prog_idr_lock should have been held */
1920 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1921 {
1922         int refold;
1923
1924         refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1925
1926         if (!refold)
1927                 return ERR_PTR(-ENOENT);
1928
1929         return prog;
1930 }
1931 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1932
1933 bool bpf_prog_get_ok(struct bpf_prog *prog,
1934                             enum bpf_prog_type *attach_type, bool attach_drv)
1935 {
1936         /* not an attachment, just a refcount inc, always allow */
1937         if (!attach_type)
1938                 return true;
1939
1940         if (prog->type != *attach_type)
1941                 return false;
1942         if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1943                 return false;
1944
1945         return true;
1946 }
1947
1948 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1949                                        bool attach_drv)
1950 {
1951         struct fd f = fdget(ufd);
1952         struct bpf_prog *prog;
1953
1954         prog = ____bpf_prog_get(f);
1955         if (IS_ERR(prog))
1956                 return prog;
1957         if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1958                 prog = ERR_PTR(-EINVAL);
1959                 goto out;
1960         }
1961
1962         bpf_prog_inc(prog);
1963 out:
1964         fdput(f);
1965         return prog;
1966 }
1967
1968 struct bpf_prog *bpf_prog_get(u32 ufd)
1969 {
1970         return __bpf_prog_get(ufd, NULL, false);
1971 }
1972
1973 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1974                                        bool attach_drv)
1975 {
1976         return __bpf_prog_get(ufd, &type, attach_drv);
1977 }
1978 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1979
1980 /* Initially all BPF programs could be loaded w/o specifying
1981  * expected_attach_type. Later for some of them specifying expected_attach_type
1982  * at load time became required so that program could be validated properly.
1983  * Programs of types that are allowed to be loaded both w/ and w/o (for
1984  * backward compatibility) expected_attach_type, should have the default attach
1985  * type assigned to expected_attach_type for the latter case, so that it can be
1986  * validated later at attach time.
1987  *
1988  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1989  * prog type requires it but has some attach types that have to be backward
1990  * compatible.
1991  */
1992 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1993 {
1994         switch (attr->prog_type) {
1995         case BPF_PROG_TYPE_CGROUP_SOCK:
1996                 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1997                  * exist so checking for non-zero is the way to go here.
1998                  */
1999                 if (!attr->expected_attach_type)
2000                         attr->expected_attach_type =
2001                                 BPF_CGROUP_INET_SOCK_CREATE;
2002                 break;
2003         case BPF_PROG_TYPE_SK_REUSEPORT:
2004                 if (!attr->expected_attach_type)
2005                         attr->expected_attach_type =
2006                                 BPF_SK_REUSEPORT_SELECT;
2007                 break;
2008         }
2009 }
2010
2011 static int
2012 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2013                            enum bpf_attach_type expected_attach_type,
2014                            struct btf *attach_btf, u32 btf_id,
2015                            struct bpf_prog *dst_prog)
2016 {
2017         if (btf_id) {
2018                 if (btf_id > BTF_MAX_TYPE)
2019                         return -EINVAL;
2020
2021                 if (!attach_btf && !dst_prog)
2022                         return -EINVAL;
2023
2024                 switch (prog_type) {
2025                 case BPF_PROG_TYPE_TRACING:
2026                 case BPF_PROG_TYPE_LSM:
2027                 case BPF_PROG_TYPE_STRUCT_OPS:
2028                 case BPF_PROG_TYPE_EXT:
2029                         break;
2030                 default:
2031                         return -EINVAL;
2032                 }
2033         }
2034
2035         if (attach_btf && (!btf_id || dst_prog))
2036                 return -EINVAL;
2037
2038         if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2039             prog_type != BPF_PROG_TYPE_EXT)
2040                 return -EINVAL;
2041
2042         switch (prog_type) {
2043         case BPF_PROG_TYPE_CGROUP_SOCK:
2044                 switch (expected_attach_type) {
2045                 case BPF_CGROUP_INET_SOCK_CREATE:
2046                 case BPF_CGROUP_INET_SOCK_RELEASE:
2047                 case BPF_CGROUP_INET4_POST_BIND:
2048                 case BPF_CGROUP_INET6_POST_BIND:
2049                         return 0;
2050                 default:
2051                         return -EINVAL;
2052                 }
2053         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2054                 switch (expected_attach_type) {
2055                 case BPF_CGROUP_INET4_BIND:
2056                 case BPF_CGROUP_INET6_BIND:
2057                 case BPF_CGROUP_INET4_CONNECT:
2058                 case BPF_CGROUP_INET6_CONNECT:
2059                 case BPF_CGROUP_INET4_GETPEERNAME:
2060                 case BPF_CGROUP_INET6_GETPEERNAME:
2061                 case BPF_CGROUP_INET4_GETSOCKNAME:
2062                 case BPF_CGROUP_INET6_GETSOCKNAME:
2063                 case BPF_CGROUP_UDP4_SENDMSG:
2064                 case BPF_CGROUP_UDP6_SENDMSG:
2065                 case BPF_CGROUP_UDP4_RECVMSG:
2066                 case BPF_CGROUP_UDP6_RECVMSG:
2067                         return 0;
2068                 default:
2069                         return -EINVAL;
2070                 }
2071         case BPF_PROG_TYPE_CGROUP_SKB:
2072                 switch (expected_attach_type) {
2073                 case BPF_CGROUP_INET_INGRESS:
2074                 case BPF_CGROUP_INET_EGRESS:
2075                         return 0;
2076                 default:
2077                         return -EINVAL;
2078                 }
2079         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2080                 switch (expected_attach_type) {
2081                 case BPF_CGROUP_SETSOCKOPT:
2082                 case BPF_CGROUP_GETSOCKOPT:
2083                         return 0;
2084                 default:
2085                         return -EINVAL;
2086                 }
2087         case BPF_PROG_TYPE_SK_LOOKUP:
2088                 if (expected_attach_type == BPF_SK_LOOKUP)
2089                         return 0;
2090                 return -EINVAL;
2091         case BPF_PROG_TYPE_SK_REUSEPORT:
2092                 switch (expected_attach_type) {
2093                 case BPF_SK_REUSEPORT_SELECT:
2094                 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2095                         return 0;
2096                 default:
2097                         return -EINVAL;
2098                 }
2099         case BPF_PROG_TYPE_SYSCALL:
2100         case BPF_PROG_TYPE_EXT:
2101                 if (expected_attach_type)
2102                         return -EINVAL;
2103                 fallthrough;
2104         default:
2105                 return 0;
2106         }
2107 }
2108
2109 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2110 {
2111         switch (prog_type) {
2112         case BPF_PROG_TYPE_SCHED_CLS:
2113         case BPF_PROG_TYPE_SCHED_ACT:
2114         case BPF_PROG_TYPE_XDP:
2115         case BPF_PROG_TYPE_LWT_IN:
2116         case BPF_PROG_TYPE_LWT_OUT:
2117         case BPF_PROG_TYPE_LWT_XMIT:
2118         case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2119         case BPF_PROG_TYPE_SK_SKB:
2120         case BPF_PROG_TYPE_SK_MSG:
2121         case BPF_PROG_TYPE_LIRC_MODE2:
2122         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2123         case BPF_PROG_TYPE_CGROUP_DEVICE:
2124         case BPF_PROG_TYPE_CGROUP_SOCK:
2125         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2126         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2127         case BPF_PROG_TYPE_CGROUP_SYSCTL:
2128         case BPF_PROG_TYPE_SOCK_OPS:
2129         case BPF_PROG_TYPE_EXT: /* extends any prog */
2130                 return true;
2131         case BPF_PROG_TYPE_CGROUP_SKB:
2132                 /* always unpriv */
2133         case BPF_PROG_TYPE_SK_REUSEPORT:
2134                 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2135         default:
2136                 return false;
2137         }
2138 }
2139
2140 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2141 {
2142         switch (prog_type) {
2143         case BPF_PROG_TYPE_KPROBE:
2144         case BPF_PROG_TYPE_TRACEPOINT:
2145         case BPF_PROG_TYPE_PERF_EVENT:
2146         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2147         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2148         case BPF_PROG_TYPE_TRACING:
2149         case BPF_PROG_TYPE_LSM:
2150         case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2151         case BPF_PROG_TYPE_EXT: /* extends any prog */
2152                 return true;
2153         default:
2154                 return false;
2155         }
2156 }
2157
2158 /* last field in 'union bpf_attr' used by this command */
2159 #define BPF_PROG_LOAD_LAST_FIELD fd_array
2160
2161 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
2162 {
2163         enum bpf_prog_type type = attr->prog_type;
2164         struct bpf_prog *prog, *dst_prog = NULL;
2165         struct btf *attach_btf = NULL;
2166         int err;
2167         char license[128];
2168         bool is_gpl;
2169
2170         if (CHECK_ATTR(BPF_PROG_LOAD))
2171                 return -EINVAL;
2172
2173         if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2174                                  BPF_F_ANY_ALIGNMENT |
2175                                  BPF_F_TEST_STATE_FREQ |
2176                                  BPF_F_SLEEPABLE |
2177                                  BPF_F_TEST_RND_HI32))
2178                 return -EINVAL;
2179
2180         if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2181             (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2182             !bpf_capable())
2183                 return -EPERM;
2184
2185         /* copy eBPF program license from user space */
2186         if (strncpy_from_bpfptr(license,
2187                                 make_bpfptr(attr->license, uattr.is_kernel),
2188                                 sizeof(license) - 1) < 0)
2189                 return -EFAULT;
2190         license[sizeof(license) - 1] = 0;
2191
2192         /* eBPF programs must be GPL compatible to use GPL-ed functions */
2193         is_gpl = license_is_gpl_compatible(license);
2194
2195         if (attr->insn_cnt == 0 ||
2196             attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2197                 return -E2BIG;
2198         if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2199             type != BPF_PROG_TYPE_CGROUP_SKB &&
2200             !bpf_capable())
2201                 return -EPERM;
2202
2203         if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2204                 return -EPERM;
2205         if (is_perfmon_prog_type(type) && !perfmon_capable())
2206                 return -EPERM;
2207
2208         /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2209          * or btf, we need to check which one it is
2210          */
2211         if (attr->attach_prog_fd) {
2212                 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2213                 if (IS_ERR(dst_prog)) {
2214                         dst_prog = NULL;
2215                         attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2216                         if (IS_ERR(attach_btf))
2217                                 return -EINVAL;
2218                         if (!btf_is_kernel(attach_btf)) {
2219                                 /* attaching through specifying bpf_prog's BTF
2220                                  * objects directly might be supported eventually
2221                                  */
2222                                 btf_put(attach_btf);
2223                                 return -ENOTSUPP;
2224                         }
2225                 }
2226         } else if (attr->attach_btf_id) {
2227                 /* fall back to vmlinux BTF, if BTF type ID is specified */
2228                 attach_btf = bpf_get_btf_vmlinux();
2229                 if (IS_ERR(attach_btf))
2230                         return PTR_ERR(attach_btf);
2231                 if (!attach_btf)
2232                         return -EINVAL;
2233                 btf_get(attach_btf);
2234         }
2235
2236         bpf_prog_load_fixup_attach_type(attr);
2237         if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2238                                        attach_btf, attr->attach_btf_id,
2239                                        dst_prog)) {
2240                 if (dst_prog)
2241                         bpf_prog_put(dst_prog);
2242                 if (attach_btf)
2243                         btf_put(attach_btf);
2244                 return -EINVAL;
2245         }
2246
2247         /* plain bpf_prog allocation */
2248         prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2249         if (!prog) {
2250                 if (dst_prog)
2251                         bpf_prog_put(dst_prog);
2252                 if (attach_btf)
2253                         btf_put(attach_btf);
2254                 return -ENOMEM;
2255         }
2256
2257         prog->expected_attach_type = attr->expected_attach_type;
2258         prog->aux->attach_btf = attach_btf;
2259         prog->aux->attach_btf_id = attr->attach_btf_id;
2260         prog->aux->dst_prog = dst_prog;
2261         prog->aux->offload_requested = !!attr->prog_ifindex;
2262         prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2263
2264         err = security_bpf_prog_alloc(prog->aux);
2265         if (err)
2266                 goto free_prog;
2267
2268         prog->aux->user = get_current_user();
2269         prog->len = attr->insn_cnt;
2270
2271         err = -EFAULT;
2272         if (copy_from_bpfptr(prog->insns,
2273                              make_bpfptr(attr->insns, uattr.is_kernel),
2274                              bpf_prog_insn_size(prog)) != 0)
2275                 goto free_prog_sec;
2276
2277         prog->orig_prog = NULL;
2278         prog->jited = 0;
2279
2280         atomic64_set(&prog->aux->refcnt, 1);
2281         prog->gpl_compatible = is_gpl ? 1 : 0;
2282
2283         if (bpf_prog_is_dev_bound(prog->aux)) {
2284                 err = bpf_prog_offload_init(prog, attr);
2285                 if (err)
2286                         goto free_prog_sec;
2287         }
2288
2289         /* find program type: socket_filter vs tracing_filter */
2290         err = find_prog_type(type, prog);
2291         if (err < 0)
2292                 goto free_prog_sec;
2293
2294         prog->aux->load_time = ktime_get_boottime_ns();
2295         err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2296                                sizeof(attr->prog_name));
2297         if (err < 0)
2298                 goto free_prog_sec;
2299
2300         /* run eBPF verifier */
2301         err = bpf_check(&prog, attr, uattr);
2302         if (err < 0)
2303                 goto free_used_maps;
2304
2305         prog = bpf_prog_select_runtime(prog, &err);
2306         if (err < 0)
2307                 goto free_used_maps;
2308
2309         err = bpf_prog_alloc_id(prog);
2310         if (err)
2311                 goto free_used_maps;
2312
2313         /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2314          * effectively publicly exposed. However, retrieving via
2315          * bpf_prog_get_fd_by_id() will take another reference,
2316          * therefore it cannot be gone underneath us.
2317          *
2318          * Only for the time /after/ successful bpf_prog_new_fd()
2319          * and before returning to userspace, we might just hold
2320          * one reference and any parallel close on that fd could
2321          * rip everything out. Hence, below notifications must
2322          * happen before bpf_prog_new_fd().
2323          *
2324          * Also, any failure handling from this point onwards must
2325          * be using bpf_prog_put() given the program is exposed.
2326          */
2327         bpf_prog_kallsyms_add(prog);
2328         perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2329         bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2330
2331         err = bpf_prog_new_fd(prog);
2332         if (err < 0)
2333                 bpf_prog_put(prog);
2334         return err;
2335
2336 free_used_maps:
2337         /* In case we have subprogs, we need to wait for a grace
2338          * period before we can tear down JIT memory since symbols
2339          * are already exposed under kallsyms.
2340          */
2341         __bpf_prog_put_noref(prog, prog->aux->func_cnt);
2342         return err;
2343 free_prog_sec:
2344         free_uid(prog->aux->user);
2345         security_bpf_prog_free(prog->aux);
2346 free_prog:
2347         if (prog->aux->attach_btf)
2348                 btf_put(prog->aux->attach_btf);
2349         bpf_prog_free(prog);
2350         return err;
2351 }
2352
2353 #define BPF_OBJ_LAST_FIELD file_flags
2354
2355 static int bpf_obj_pin(const union bpf_attr *attr)
2356 {
2357         if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2358                 return -EINVAL;
2359
2360         return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2361 }
2362
2363 static int bpf_obj_get(const union bpf_attr *attr)
2364 {
2365         if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2366             attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2367                 return -EINVAL;
2368
2369         return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2370                                 attr->file_flags);
2371 }
2372
2373 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2374                    const struct bpf_link_ops *ops, struct bpf_prog *prog)
2375 {
2376         atomic64_set(&link->refcnt, 1);
2377         link->type = type;
2378         link->id = 0;
2379         link->ops = ops;
2380         link->prog = prog;
2381 }
2382
2383 static void bpf_link_free_id(int id)
2384 {
2385         if (!id)
2386                 return;
2387
2388         spin_lock_bh(&link_idr_lock);
2389         idr_remove(&link_idr, id);
2390         spin_unlock_bh(&link_idr_lock);
2391 }
2392
2393 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2394  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2395  * anon_inode's release() call. This helper marksbpf_link as
2396  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2397  * is not decremented, it's the responsibility of a calling code that failed
2398  * to complete bpf_link initialization.
2399  */
2400 void bpf_link_cleanup(struct bpf_link_primer *primer)
2401 {
2402         primer->link->prog = NULL;
2403         bpf_link_free_id(primer->id);
2404         fput(primer->file);
2405         put_unused_fd(primer->fd);
2406 }
2407
2408 void bpf_link_inc(struct bpf_link *link)
2409 {
2410         atomic64_inc(&link->refcnt);
2411 }
2412
2413 /* bpf_link_free is guaranteed to be called from process context */
2414 static void bpf_link_free(struct bpf_link *link)
2415 {
2416         bpf_link_free_id(link->id);
2417         if (link->prog) {
2418                 /* detach BPF program, clean up used resources */
2419                 link->ops->release(link);
2420                 bpf_prog_put(link->prog);
2421         }
2422         /* free bpf_link and its containing memory */
2423         link->ops->dealloc(link);
2424 }
2425
2426 static void bpf_link_put_deferred(struct work_struct *work)
2427 {
2428         struct bpf_link *link = container_of(work, struct bpf_link, work);
2429
2430         bpf_link_free(link);
2431 }
2432
2433 /* bpf_link_put can be called from atomic context, but ensures that resources
2434  * are freed from process context
2435  */
2436 void bpf_link_put(struct bpf_link *link)
2437 {
2438         if (!atomic64_dec_and_test(&link->refcnt))
2439                 return;
2440
2441         if (in_atomic()) {
2442                 INIT_WORK(&link->work, bpf_link_put_deferred);
2443                 schedule_work(&link->work);
2444         } else {
2445                 bpf_link_free(link);
2446         }
2447 }
2448
2449 static int bpf_link_release(struct inode *inode, struct file *filp)
2450 {
2451         struct bpf_link *link = filp->private_data;
2452
2453         bpf_link_put(link);
2454         return 0;
2455 }
2456
2457 #ifdef CONFIG_PROC_FS
2458 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2459 #define BPF_MAP_TYPE(_id, _ops)
2460 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2461 static const char *bpf_link_type_strs[] = {
2462         [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2463 #include <linux/bpf_types.h>
2464 };
2465 #undef BPF_PROG_TYPE
2466 #undef BPF_MAP_TYPE
2467 #undef BPF_LINK_TYPE
2468
2469 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2470 {
2471         const struct bpf_link *link = filp->private_data;
2472         const struct bpf_prog *prog = link->prog;
2473         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2474
2475         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2476         seq_printf(m,
2477                    "link_type:\t%s\n"
2478                    "link_id:\t%u\n"
2479                    "prog_tag:\t%s\n"
2480                    "prog_id:\t%u\n",
2481                    bpf_link_type_strs[link->type],
2482                    link->id,
2483                    prog_tag,
2484                    prog->aux->id);
2485         if (link->ops->show_fdinfo)
2486                 link->ops->show_fdinfo(link, m);
2487 }
2488 #endif
2489
2490 static const struct file_operations bpf_link_fops = {
2491 #ifdef CONFIG_PROC_FS
2492         .show_fdinfo    = bpf_link_show_fdinfo,
2493 #endif
2494         .release        = bpf_link_release,
2495         .read           = bpf_dummy_read,
2496         .write          = bpf_dummy_write,
2497 };
2498
2499 static int bpf_link_alloc_id(struct bpf_link *link)
2500 {
2501         int id;
2502
2503         idr_preload(GFP_KERNEL);
2504         spin_lock_bh(&link_idr_lock);
2505         id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2506         spin_unlock_bh(&link_idr_lock);
2507         idr_preload_end();
2508
2509         return id;
2510 }
2511
2512 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2513  * reserving unused FD and allocating ID from link_idr. This is to be paired
2514  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2515  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2516  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2517  * transient state is passed around in struct bpf_link_primer.
2518  * This is preferred way to create and initialize bpf_link, especially when
2519  * there are complicated and expensive operations inbetween creating bpf_link
2520  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2521  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2522  * expensive (and potentially failing) roll back operations in a rare case
2523  * that file, FD, or ID can't be allocated.
2524  */
2525 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2526 {
2527         struct file *file;
2528         int fd, id;
2529
2530         fd = get_unused_fd_flags(O_CLOEXEC);
2531         if (fd < 0)
2532                 return fd;
2533
2534
2535         id = bpf_link_alloc_id(link);
2536         if (id < 0) {
2537                 put_unused_fd(fd);
2538                 return id;
2539         }
2540
2541         file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2542         if (IS_ERR(file)) {
2543                 bpf_link_free_id(id);
2544                 put_unused_fd(fd);
2545                 return PTR_ERR(file);
2546         }
2547
2548         primer->link = link;
2549         primer->file = file;
2550         primer->fd = fd;
2551         primer->id = id;
2552         return 0;
2553 }
2554
2555 int bpf_link_settle(struct bpf_link_primer *primer)
2556 {
2557         /* make bpf_link fetchable by ID */
2558         spin_lock_bh(&link_idr_lock);
2559         primer->link->id = primer->id;
2560         spin_unlock_bh(&link_idr_lock);
2561         /* make bpf_link fetchable by FD */
2562         fd_install(primer->fd, primer->file);
2563         /* pass through installed FD */
2564         return primer->fd;
2565 }
2566
2567 int bpf_link_new_fd(struct bpf_link *link)
2568 {
2569         return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2570 }
2571
2572 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2573 {
2574         struct fd f = fdget(ufd);
2575         struct bpf_link *link;
2576
2577         if (!f.file)
2578                 return ERR_PTR(-EBADF);
2579         if (f.file->f_op != &bpf_link_fops) {
2580                 fdput(f);
2581                 return ERR_PTR(-EINVAL);
2582         }
2583
2584         link = f.file->private_data;
2585         bpf_link_inc(link);
2586         fdput(f);
2587
2588         return link;
2589 }
2590
2591 struct bpf_tracing_link {
2592         struct bpf_link link;
2593         enum bpf_attach_type attach_type;
2594         struct bpf_trampoline *trampoline;
2595         struct bpf_prog *tgt_prog;
2596 };
2597
2598 static void bpf_tracing_link_release(struct bpf_link *link)
2599 {
2600         struct bpf_tracing_link *tr_link =
2601                 container_of(link, struct bpf_tracing_link, link);
2602
2603         WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
2604                                                 tr_link->trampoline));
2605
2606         bpf_trampoline_put(tr_link->trampoline);
2607
2608         /* tgt_prog is NULL if target is a kernel function */
2609         if (tr_link->tgt_prog)
2610                 bpf_prog_put(tr_link->tgt_prog);
2611 }
2612
2613 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2614 {
2615         struct bpf_tracing_link *tr_link =
2616                 container_of(link, struct bpf_tracing_link, link);
2617
2618         kfree(tr_link);
2619 }
2620
2621 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2622                                          struct seq_file *seq)
2623 {
2624         struct bpf_tracing_link *tr_link =
2625                 container_of(link, struct bpf_tracing_link, link);
2626
2627         seq_printf(seq,
2628                    "attach_type:\t%d\n",
2629                    tr_link->attach_type);
2630 }
2631
2632 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2633                                            struct bpf_link_info *info)
2634 {
2635         struct bpf_tracing_link *tr_link =
2636                 container_of(link, struct bpf_tracing_link, link);
2637
2638         info->tracing.attach_type = tr_link->attach_type;
2639         bpf_trampoline_unpack_key(tr_link->trampoline->key,
2640                                   &info->tracing.target_obj_id,
2641                                   &info->tracing.target_btf_id);
2642
2643         return 0;
2644 }
2645
2646 static const struct bpf_link_ops bpf_tracing_link_lops = {
2647         .release = bpf_tracing_link_release,
2648         .dealloc = bpf_tracing_link_dealloc,
2649         .show_fdinfo = bpf_tracing_link_show_fdinfo,
2650         .fill_link_info = bpf_tracing_link_fill_link_info,
2651 };
2652
2653 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
2654                                    int tgt_prog_fd,
2655                                    u32 btf_id)
2656 {
2657         struct bpf_link_primer link_primer;
2658         struct bpf_prog *tgt_prog = NULL;
2659         struct bpf_trampoline *tr = NULL;
2660         struct bpf_tracing_link *link;
2661         u64 key = 0;
2662         int err;
2663
2664         switch (prog->type) {
2665         case BPF_PROG_TYPE_TRACING:
2666                 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2667                     prog->expected_attach_type != BPF_TRACE_FEXIT &&
2668                     prog->expected_attach_type != BPF_MODIFY_RETURN) {
2669                         err = -EINVAL;
2670                         goto out_put_prog;
2671                 }
2672                 break;
2673         case BPF_PROG_TYPE_EXT:
2674                 if (prog->expected_attach_type != 0) {
2675                         err = -EINVAL;
2676                         goto out_put_prog;
2677                 }
2678                 break;
2679         case BPF_PROG_TYPE_LSM:
2680                 if (prog->expected_attach_type != BPF_LSM_MAC) {
2681                         err = -EINVAL;
2682                         goto out_put_prog;
2683                 }
2684                 break;
2685         default:
2686                 err = -EINVAL;
2687                 goto out_put_prog;
2688         }
2689
2690         if (!!tgt_prog_fd != !!btf_id) {
2691                 err = -EINVAL;
2692                 goto out_put_prog;
2693         }
2694
2695         if (tgt_prog_fd) {
2696                 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */
2697                 if (prog->type != BPF_PROG_TYPE_EXT) {
2698                         err = -EINVAL;
2699                         goto out_put_prog;
2700                 }
2701
2702                 tgt_prog = bpf_prog_get(tgt_prog_fd);
2703                 if (IS_ERR(tgt_prog)) {
2704                         err = PTR_ERR(tgt_prog);
2705                         tgt_prog = NULL;
2706                         goto out_put_prog;
2707                 }
2708
2709                 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
2710         }
2711
2712         link = kzalloc(sizeof(*link), GFP_USER);
2713         if (!link) {
2714                 err = -ENOMEM;
2715                 goto out_put_prog;
2716         }
2717         bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2718                       &bpf_tracing_link_lops, prog);
2719         link->attach_type = prog->expected_attach_type;
2720
2721         mutex_lock(&prog->aux->dst_mutex);
2722
2723         /* There are a few possible cases here:
2724          *
2725          * - if prog->aux->dst_trampoline is set, the program was just loaded
2726          *   and not yet attached to anything, so we can use the values stored
2727          *   in prog->aux
2728          *
2729          * - if prog->aux->dst_trampoline is NULL, the program has already been
2730          *   attached to a target and its initial target was cleared (below)
2731          *
2732          * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
2733          *   target_btf_id using the link_create API.
2734          *
2735          * - if tgt_prog == NULL when this function was called using the old
2736          *   raw_tracepoint_open API, and we need a target from prog->aux
2737          *
2738          * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
2739          *   was detached and is going for re-attachment.
2740          */
2741         if (!prog->aux->dst_trampoline && !tgt_prog) {
2742                 /*
2743                  * Allow re-attach for TRACING and LSM programs. If it's
2744                  * currently linked, bpf_trampoline_link_prog will fail.
2745                  * EXT programs need to specify tgt_prog_fd, so they
2746                  * re-attach in separate code path.
2747                  */
2748                 if (prog->type != BPF_PROG_TYPE_TRACING &&
2749                     prog->type != BPF_PROG_TYPE_LSM) {
2750                         err = -EINVAL;
2751                         goto out_unlock;
2752                 }
2753                 btf_id = prog->aux->attach_btf_id;
2754                 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
2755         }
2756
2757         if (!prog->aux->dst_trampoline ||
2758             (key && key != prog->aux->dst_trampoline->key)) {
2759                 /* If there is no saved target, or the specified target is
2760                  * different from the destination specified at load time, we
2761                  * need a new trampoline and a check for compatibility
2762                  */
2763                 struct bpf_attach_target_info tgt_info = {};
2764
2765                 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
2766                                               &tgt_info);
2767                 if (err)
2768                         goto out_unlock;
2769
2770                 tr = bpf_trampoline_get(key, &tgt_info);
2771                 if (!tr) {
2772                         err = -ENOMEM;
2773                         goto out_unlock;
2774                 }
2775         } else {
2776                 /* The caller didn't specify a target, or the target was the
2777                  * same as the destination supplied during program load. This
2778                  * means we can reuse the trampoline and reference from program
2779                  * load time, and there is no need to allocate a new one. This
2780                  * can only happen once for any program, as the saved values in
2781                  * prog->aux are cleared below.
2782                  */
2783                 tr = prog->aux->dst_trampoline;
2784                 tgt_prog = prog->aux->dst_prog;
2785         }
2786
2787         err = bpf_link_prime(&link->link, &link_primer);
2788         if (err)
2789                 goto out_unlock;
2790
2791         err = bpf_trampoline_link_prog(prog, tr);
2792         if (err) {
2793                 bpf_link_cleanup(&link_primer);
2794                 link = NULL;
2795                 goto out_unlock;
2796         }
2797
2798         link->tgt_prog = tgt_prog;
2799         link->trampoline = tr;
2800
2801         /* Always clear the trampoline and target prog from prog->aux to make
2802          * sure the original attach destination is not kept alive after a
2803          * program is (re-)attached to another target.
2804          */
2805         if (prog->aux->dst_prog &&
2806             (tgt_prog_fd || tr != prog->aux->dst_trampoline))
2807                 /* got extra prog ref from syscall, or attaching to different prog */
2808                 bpf_prog_put(prog->aux->dst_prog);
2809         if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
2810                 /* we allocated a new trampoline, so free the old one */
2811                 bpf_trampoline_put(prog->aux->dst_trampoline);
2812
2813         prog->aux->dst_prog = NULL;
2814         prog->aux->dst_trampoline = NULL;
2815         mutex_unlock(&prog->aux->dst_mutex);
2816
2817         return bpf_link_settle(&link_primer);
2818 out_unlock:
2819         if (tr && tr != prog->aux->dst_trampoline)
2820                 bpf_trampoline_put(tr);
2821         mutex_unlock(&prog->aux->dst_mutex);
2822         kfree(link);
2823 out_put_prog:
2824         if (tgt_prog_fd && tgt_prog)
2825                 bpf_prog_put(tgt_prog);
2826         return err;
2827 }
2828
2829 struct bpf_raw_tp_link {
2830         struct bpf_link link;
2831         struct bpf_raw_event_map *btp;
2832 };
2833
2834 static void bpf_raw_tp_link_release(struct bpf_link *link)
2835 {
2836         struct bpf_raw_tp_link *raw_tp =
2837                 container_of(link, struct bpf_raw_tp_link, link);
2838
2839         bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2840         bpf_put_raw_tracepoint(raw_tp->btp);
2841 }
2842
2843 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2844 {
2845         struct bpf_raw_tp_link *raw_tp =
2846                 container_of(link, struct bpf_raw_tp_link, link);
2847
2848         kfree(raw_tp);
2849 }
2850
2851 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2852                                         struct seq_file *seq)
2853 {
2854         struct bpf_raw_tp_link *raw_tp_link =
2855                 container_of(link, struct bpf_raw_tp_link, link);
2856
2857         seq_printf(seq,
2858                    "tp_name:\t%s\n",
2859                    raw_tp_link->btp->tp->name);
2860 }
2861
2862 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2863                                           struct bpf_link_info *info)
2864 {
2865         struct bpf_raw_tp_link *raw_tp_link =
2866                 container_of(link, struct bpf_raw_tp_link, link);
2867         char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2868         const char *tp_name = raw_tp_link->btp->tp->name;
2869         u32 ulen = info->raw_tracepoint.tp_name_len;
2870         size_t tp_len = strlen(tp_name);
2871
2872         if (!ulen ^ !ubuf)
2873                 return -EINVAL;
2874
2875         info->raw_tracepoint.tp_name_len = tp_len + 1;
2876
2877         if (!ubuf)
2878                 return 0;
2879
2880         if (ulen >= tp_len + 1) {
2881                 if (copy_to_user(ubuf, tp_name, tp_len + 1))
2882                         return -EFAULT;
2883         } else {
2884                 char zero = '\0';
2885
2886                 if (copy_to_user(ubuf, tp_name, ulen - 1))
2887                         return -EFAULT;
2888                 if (put_user(zero, ubuf + ulen - 1))
2889                         return -EFAULT;
2890                 return -ENOSPC;
2891         }
2892
2893         return 0;
2894 }
2895
2896 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2897         .release = bpf_raw_tp_link_release,
2898         .dealloc = bpf_raw_tp_link_dealloc,
2899         .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2900         .fill_link_info = bpf_raw_tp_link_fill_link_info,
2901 };
2902
2903 #ifdef CONFIG_PERF_EVENTS
2904 struct bpf_perf_link {
2905         struct bpf_link link;
2906         struct file *perf_file;
2907 };
2908
2909 static void bpf_perf_link_release(struct bpf_link *link)
2910 {
2911         struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
2912         struct perf_event *event = perf_link->perf_file->private_data;
2913
2914         perf_event_free_bpf_prog(event);
2915         fput(perf_link->perf_file);
2916 }
2917
2918 static void bpf_perf_link_dealloc(struct bpf_link *link)
2919 {
2920         struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
2921
2922         kfree(perf_link);
2923 }
2924
2925 static const struct bpf_link_ops bpf_perf_link_lops = {
2926         .release = bpf_perf_link_release,
2927         .dealloc = bpf_perf_link_dealloc,
2928 };
2929
2930 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2931 {
2932         struct bpf_link_primer link_primer;
2933         struct bpf_perf_link *link;
2934         struct perf_event *event;
2935         struct file *perf_file;
2936         int err;
2937
2938         if (attr->link_create.flags)
2939                 return -EINVAL;
2940
2941         perf_file = perf_event_get(attr->link_create.target_fd);
2942         if (IS_ERR(perf_file))
2943                 return PTR_ERR(perf_file);
2944
2945         link = kzalloc(sizeof(*link), GFP_USER);
2946         if (!link) {
2947                 err = -ENOMEM;
2948                 goto out_put_file;
2949         }
2950         bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
2951         link->perf_file = perf_file;
2952
2953         err = bpf_link_prime(&link->link, &link_primer);
2954         if (err) {
2955                 kfree(link);
2956                 goto out_put_file;
2957         }
2958
2959         event = perf_file->private_data;
2960         err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
2961         if (err) {
2962                 bpf_link_cleanup(&link_primer);
2963                 goto out_put_file;
2964         }
2965         /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
2966         bpf_prog_inc(prog);
2967
2968         return bpf_link_settle(&link_primer);
2969
2970 out_put_file:
2971         fput(perf_file);
2972         return err;
2973 }
2974 #endif /* CONFIG_PERF_EVENTS */
2975
2976 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2977
2978 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2979 {
2980         struct bpf_link_primer link_primer;
2981         struct bpf_raw_tp_link *link;
2982         struct bpf_raw_event_map *btp;
2983         struct bpf_prog *prog;
2984         const char *tp_name;
2985         char buf[128];
2986         int err;
2987
2988         if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2989                 return -EINVAL;
2990
2991         prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2992         if (IS_ERR(prog))
2993                 return PTR_ERR(prog);
2994
2995         switch (prog->type) {
2996         case BPF_PROG_TYPE_TRACING:
2997         case BPF_PROG_TYPE_EXT:
2998         case BPF_PROG_TYPE_LSM:
2999                 if (attr->raw_tracepoint.name) {
3000                         /* The attach point for this category of programs
3001                          * should be specified via btf_id during program load.
3002                          */
3003                         err = -EINVAL;
3004                         goto out_put_prog;
3005                 }
3006                 if (prog->type == BPF_PROG_TYPE_TRACING &&
3007                     prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3008                         tp_name = prog->aux->attach_func_name;
3009                         break;
3010                 }
3011                 err = bpf_tracing_prog_attach(prog, 0, 0);
3012                 if (err >= 0)
3013                         return err;
3014                 goto out_put_prog;
3015         case BPF_PROG_TYPE_RAW_TRACEPOINT:
3016         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3017                 if (strncpy_from_user(buf,
3018                                       u64_to_user_ptr(attr->raw_tracepoint.name),
3019                                       sizeof(buf) - 1) < 0) {
3020                         err = -EFAULT;
3021                         goto out_put_prog;
3022                 }
3023                 buf[sizeof(buf) - 1] = 0;
3024                 tp_name = buf;
3025                 break;
3026         default:
3027                 err = -EINVAL;
3028                 goto out_put_prog;
3029         }
3030
3031         btp = bpf_get_raw_tracepoint(tp_name);
3032         if (!btp) {
3033                 err = -ENOENT;
3034                 goto out_put_prog;
3035         }
3036
3037         link = kzalloc(sizeof(*link), GFP_USER);
3038         if (!link) {
3039                 err = -ENOMEM;
3040                 goto out_put_btp;
3041         }
3042         bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3043                       &bpf_raw_tp_link_lops, prog);
3044         link->btp = btp;
3045
3046         err = bpf_link_prime(&link->link, &link_primer);
3047         if (err) {
3048                 kfree(link);
3049                 goto out_put_btp;
3050         }
3051
3052         err = bpf_probe_register(link->btp, prog);
3053         if (err) {
3054                 bpf_link_cleanup(&link_primer);
3055                 goto out_put_btp;
3056         }
3057
3058         return bpf_link_settle(&link_primer);
3059
3060 out_put_btp:
3061         bpf_put_raw_tracepoint(btp);
3062 out_put_prog:
3063         bpf_prog_put(prog);
3064         return err;
3065 }
3066
3067 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3068                                              enum bpf_attach_type attach_type)
3069 {
3070         switch (prog->type) {
3071         case BPF_PROG_TYPE_CGROUP_SOCK:
3072         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3073         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3074         case BPF_PROG_TYPE_SK_LOOKUP:
3075                 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3076         case BPF_PROG_TYPE_CGROUP_SKB:
3077                 if (!capable(CAP_NET_ADMIN))
3078                         /* cg-skb progs can be loaded by unpriv user.
3079                          * check permissions at attach time.
3080                          */
3081                         return -EPERM;
3082                 return prog->enforce_expected_attach_type &&
3083                         prog->expected_attach_type != attach_type ?
3084                         -EINVAL : 0;
3085         default:
3086                 return 0;
3087         }
3088 }
3089
3090 static enum bpf_prog_type
3091 attach_type_to_prog_type(enum bpf_attach_type attach_type)
3092 {
3093         switch (attach_type) {
3094         case BPF_CGROUP_INET_INGRESS:
3095         case BPF_CGROUP_INET_EGRESS:
3096                 return BPF_PROG_TYPE_CGROUP_SKB;
3097         case BPF_CGROUP_INET_SOCK_CREATE:
3098         case BPF_CGROUP_INET_SOCK_RELEASE:
3099         case BPF_CGROUP_INET4_POST_BIND:
3100         case BPF_CGROUP_INET6_POST_BIND:
3101                 return BPF_PROG_TYPE_CGROUP_SOCK;
3102         case BPF_CGROUP_INET4_BIND:
3103         case BPF_CGROUP_INET6_BIND:
3104         case BPF_CGROUP_INET4_CONNECT:
3105         case BPF_CGROUP_INET6_CONNECT:
3106         case BPF_CGROUP_INET4_GETPEERNAME:
3107         case BPF_CGROUP_INET6_GETPEERNAME:
3108         case BPF_CGROUP_INET4_GETSOCKNAME:
3109         case BPF_CGROUP_INET6_GETSOCKNAME:
3110         case BPF_CGROUP_UDP4_SENDMSG:
3111         case BPF_CGROUP_UDP6_SENDMSG:
3112         case BPF_CGROUP_UDP4_RECVMSG:
3113         case BPF_CGROUP_UDP6_RECVMSG:
3114                 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3115         case BPF_CGROUP_SOCK_OPS:
3116                 return BPF_PROG_TYPE_SOCK_OPS;
3117         case BPF_CGROUP_DEVICE:
3118                 return BPF_PROG_TYPE_CGROUP_DEVICE;
3119         case BPF_SK_MSG_VERDICT:
3120                 return BPF_PROG_TYPE_SK_MSG;
3121         case BPF_SK_SKB_STREAM_PARSER:
3122         case BPF_SK_SKB_STREAM_VERDICT:
3123         case BPF_SK_SKB_VERDICT:
3124                 return BPF_PROG_TYPE_SK_SKB;
3125         case BPF_LIRC_MODE2:
3126                 return BPF_PROG_TYPE_LIRC_MODE2;
3127         case BPF_FLOW_DISSECTOR:
3128                 return BPF_PROG_TYPE_FLOW_DISSECTOR;
3129         case BPF_CGROUP_SYSCTL:
3130                 return BPF_PROG_TYPE_CGROUP_SYSCTL;
3131         case BPF_CGROUP_GETSOCKOPT:
3132         case BPF_CGROUP_SETSOCKOPT:
3133                 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3134         case BPF_TRACE_ITER:
3135                 return BPF_PROG_TYPE_TRACING;
3136         case BPF_SK_LOOKUP:
3137                 return BPF_PROG_TYPE_SK_LOOKUP;
3138         case BPF_XDP:
3139                 return BPF_PROG_TYPE_XDP;
3140         default:
3141                 return BPF_PROG_TYPE_UNSPEC;
3142         }
3143 }
3144
3145 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
3146
3147 #define BPF_F_ATTACH_MASK \
3148         (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
3149
3150 static int bpf_prog_attach(const union bpf_attr *attr)
3151 {
3152         enum bpf_prog_type ptype;
3153         struct bpf_prog *prog;
3154         int ret;
3155
3156         if (CHECK_ATTR(BPF_PROG_ATTACH))
3157                 return -EINVAL;
3158
3159         if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
3160                 return -EINVAL;
3161
3162         ptype = attach_type_to_prog_type(attr->attach_type);
3163         if (ptype == BPF_PROG_TYPE_UNSPEC)
3164                 return -EINVAL;
3165
3166         prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3167         if (IS_ERR(prog))
3168                 return PTR_ERR(prog);
3169
3170         if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3171                 bpf_prog_put(prog);
3172                 return -EINVAL;
3173         }
3174
3175         switch (ptype) {
3176         case BPF_PROG_TYPE_SK_SKB:
3177         case BPF_PROG_TYPE_SK_MSG:
3178                 ret = sock_map_get_from_fd(attr, prog);
3179                 break;
3180         case BPF_PROG_TYPE_LIRC_MODE2:
3181                 ret = lirc_prog_attach(attr, prog);
3182                 break;
3183         case BPF_PROG_TYPE_FLOW_DISSECTOR:
3184                 ret = netns_bpf_prog_attach(attr, prog);
3185                 break;
3186         case BPF_PROG_TYPE_CGROUP_DEVICE:
3187         case BPF_PROG_TYPE_CGROUP_SKB:
3188         case BPF_PROG_TYPE_CGROUP_SOCK:
3189         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3190         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3191         case BPF_PROG_TYPE_CGROUP_SYSCTL:
3192         case BPF_PROG_TYPE_SOCK_OPS:
3193                 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3194                 break;
3195         default:
3196                 ret = -EINVAL;
3197         }
3198
3199         if (ret)
3200                 bpf_prog_put(prog);
3201         return ret;
3202 }
3203
3204 #define BPF_PROG_DETACH_LAST_FIELD attach_type
3205
3206 static int bpf_prog_detach(const union bpf_attr *attr)
3207 {
3208         enum bpf_prog_type ptype;
3209
3210         if (CHECK_ATTR(BPF_PROG_DETACH))
3211                 return -EINVAL;
3212
3213         ptype = attach_type_to_prog_type(attr->attach_type);
3214
3215         switch (ptype) {
3216         case BPF_PROG_TYPE_SK_MSG:
3217         case BPF_PROG_TYPE_SK_SKB:
3218                 return sock_map_prog_detach(attr, ptype);
3219         case BPF_PROG_TYPE_LIRC_MODE2:
3220                 return lirc_prog_detach(attr);
3221         case BPF_PROG_TYPE_FLOW_DISSECTOR:
3222                 return netns_bpf_prog_detach(attr, ptype);
3223         case BPF_PROG_TYPE_CGROUP_DEVICE:
3224         case BPF_PROG_TYPE_CGROUP_SKB:
3225         case BPF_PROG_TYPE_CGROUP_SOCK:
3226         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3227         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3228         case BPF_PROG_TYPE_CGROUP_SYSCTL:
3229         case BPF_PROG_TYPE_SOCK_OPS:
3230                 return cgroup_bpf_prog_detach(attr, ptype);
3231         default:
3232                 return -EINVAL;
3233         }
3234 }
3235
3236 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
3237
3238 static int bpf_prog_query(const union bpf_attr *attr,
3239                           union bpf_attr __user *uattr)
3240 {
3241         if (!capable(CAP_NET_ADMIN))
3242                 return -EPERM;
3243         if (CHECK_ATTR(BPF_PROG_QUERY))
3244                 return -EINVAL;
3245         if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3246                 return -EINVAL;
3247
3248         switch (attr->query.attach_type) {
3249         case BPF_CGROUP_INET_INGRESS:
3250         case BPF_CGROUP_INET_EGRESS:
3251         case BPF_CGROUP_INET_SOCK_CREATE:
3252         case BPF_CGROUP_INET_SOCK_RELEASE:
3253         case BPF_CGROUP_INET4_BIND:
3254         case BPF_CGROUP_INET6_BIND:
3255         case BPF_CGROUP_INET4_POST_BIND:
3256         case BPF_CGROUP_INET6_POST_BIND:
3257         case BPF_CGROUP_INET4_CONNECT:
3258         case BPF_CGROUP_INET6_CONNECT:
3259         case BPF_CGROUP_INET4_GETPEERNAME:
3260         case BPF_CGROUP_INET6_GETPEERNAME:
3261         case BPF_CGROUP_INET4_GETSOCKNAME:
3262         case BPF_CGROUP_INET6_GETSOCKNAME:
3263         case BPF_CGROUP_UDP4_SENDMSG:
3264         case BPF_CGROUP_UDP6_SENDMSG:
3265         case BPF_CGROUP_UDP4_RECVMSG:
3266         case BPF_CGROUP_UDP6_RECVMSG:
3267         case BPF_CGROUP_SOCK_OPS:
3268         case BPF_CGROUP_DEVICE:
3269         case BPF_CGROUP_SYSCTL:
3270         case BPF_CGROUP_GETSOCKOPT:
3271         case BPF_CGROUP_SETSOCKOPT:
3272                 return cgroup_bpf_prog_query(attr, uattr);
3273         case BPF_LIRC_MODE2:
3274                 return lirc_prog_query(attr, uattr);
3275         case BPF_FLOW_DISSECTOR:
3276         case BPF_SK_LOOKUP:
3277                 return netns_bpf_prog_query(attr, uattr);
3278         default:
3279                 return -EINVAL;
3280         }
3281 }
3282
3283 #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
3284
3285 static int bpf_prog_test_run(const union bpf_attr *attr,
3286                              union bpf_attr __user *uattr)
3287 {
3288         struct bpf_prog *prog;
3289         int ret = -ENOTSUPP;
3290
3291         if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3292                 return -EINVAL;
3293
3294         if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3295             (!attr->test.ctx_size_in && attr->test.ctx_in))
3296                 return -EINVAL;
3297
3298         if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3299             (!attr->test.ctx_size_out && attr->test.ctx_out))
3300                 return -EINVAL;
3301
3302         prog = bpf_prog_get(attr->test.prog_fd);
3303         if (IS_ERR(prog))
3304                 return PTR_ERR(prog);
3305
3306         if (prog->aux->ops->test_run)
3307                 ret = prog->aux->ops->test_run(prog, attr, uattr);
3308
3309         bpf_prog_put(prog);
3310         return ret;
3311 }
3312
3313 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3314
3315 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3316                                union bpf_attr __user *uattr,
3317                                struct idr *idr,
3318                                spinlock_t *lock)
3319 {
3320         u32 next_id = attr->start_id;
3321         int err = 0;
3322
3323         if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3324                 return -EINVAL;
3325
3326         if (!capable(CAP_SYS_ADMIN))
3327                 return -EPERM;
3328
3329         next_id++;
3330         spin_lock_bh(lock);
3331         if (!idr_get_next(idr, &next_id))
3332                 err = -ENOENT;
3333         spin_unlock_bh(lock);
3334
3335         if (!err)
3336                 err = put_user(next_id, &uattr->next_id);
3337
3338         return err;
3339 }
3340
3341 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3342 {
3343         struct bpf_map *map;
3344
3345         spin_lock_bh(&map_idr_lock);
3346 again:
3347         map = idr_get_next(&map_idr, id);
3348         if (map) {
3349                 map = __bpf_map_inc_not_zero(map, false);
3350                 if (IS_ERR(map)) {
3351                         (*id)++;
3352                         goto again;
3353                 }
3354         }
3355         spin_unlock_bh(&map_idr_lock);
3356
3357         return map;
3358 }
3359
3360 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3361 {
3362         struct bpf_prog *prog;
3363
3364         spin_lock_bh(&prog_idr_lock);
3365 again:
3366         prog = idr_get_next(&prog_idr, id);
3367         if (prog) {
3368                 prog = bpf_prog_inc_not_zero(prog);
3369                 if (IS_ERR(prog)) {
3370                         (*id)++;
3371                         goto again;
3372                 }
3373         }
3374         spin_unlock_bh(&prog_idr_lock);
3375
3376         return prog;
3377 }
3378
3379 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3380
3381 struct bpf_prog *bpf_prog_by_id(u32 id)
3382 {
3383         struct bpf_prog *prog;
3384
3385         if (!id)
3386                 return ERR_PTR(-ENOENT);
3387
3388         spin_lock_bh(&prog_idr_lock);
3389         prog = idr_find(&prog_idr, id);
3390         if (prog)
3391                 prog = bpf_prog_inc_not_zero(prog);
3392         else
3393                 prog = ERR_PTR(-ENOENT);
3394         spin_unlock_bh(&prog_idr_lock);
3395         return prog;
3396 }
3397
3398 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3399 {
3400         struct bpf_prog *prog;
3401         u32 id = attr->prog_id;
3402         int fd;
3403
3404         if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3405                 return -EINVAL;
3406
3407         if (!capable(CAP_SYS_ADMIN))
3408                 return -EPERM;
3409
3410         prog = bpf_prog_by_id(id);
3411         if (IS_ERR(prog))
3412                 return PTR_ERR(prog);
3413
3414         fd = bpf_prog_new_fd(prog);
3415         if (fd < 0)
3416                 bpf_prog_put(prog);
3417
3418         return fd;
3419 }
3420
3421 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3422
3423 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3424 {
3425         struct bpf_map *map;
3426         u32 id = attr->map_id;
3427         int f_flags;
3428         int fd;
3429
3430         if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3431             attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3432                 return -EINVAL;
3433
3434         if (!capable(CAP_SYS_ADMIN))
3435                 return -EPERM;
3436
3437         f_flags = bpf_get_file_flag(attr->open_flags);
3438         if (f_flags < 0)
3439                 return f_flags;
3440
3441         spin_lock_bh(&map_idr_lock);
3442         map = idr_find(&map_idr, id);
3443         if (map)
3444                 map = __bpf_map_inc_not_zero(map, true);
3445         else
3446                 map = ERR_PTR(-ENOENT);
3447         spin_unlock_bh(&map_idr_lock);
3448
3449         if (IS_ERR(map))
3450                 return PTR_ERR(map);
3451
3452         fd = bpf_map_new_fd(map, f_flags);
3453         if (fd < 0)
3454                 bpf_map_put_with_uref(map);
3455
3456         return fd;
3457 }
3458
3459 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3460                                               unsigned long addr, u32 *off,
3461                                               u32 *type)
3462 {
3463         const struct bpf_map *map;
3464         int i;
3465
3466         mutex_lock(&prog->aux->used_maps_mutex);
3467         for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3468                 map = prog->aux->used_maps[i];
3469                 if (map == (void *)addr) {
3470                         *type = BPF_PSEUDO_MAP_FD;
3471                         goto out;
3472                 }
3473                 if (!map->ops->map_direct_value_meta)
3474                         continue;
3475                 if (!map->ops->map_direct_value_meta(map, addr, off)) {
3476                         *type = BPF_PSEUDO_MAP_VALUE;
3477                         goto out;
3478                 }
3479         }
3480         map = NULL;
3481
3482 out:
3483         mutex_unlock(&prog->aux->used_maps_mutex);
3484         return map;
3485 }
3486
3487 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3488                                               const struct cred *f_cred)
3489 {
3490         const struct bpf_map *map;
3491         struct bpf_insn *insns;
3492         u32 off, type;
3493         u64 imm;
3494         u8 code;
3495         int i;
3496
3497         insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3498                         GFP_USER);
3499         if (!insns)
3500                 return insns;
3501
3502         for (i = 0; i < prog->len; i++) {
3503                 code = insns[i].code;
3504
3505                 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3506                         insns[i].code = BPF_JMP | BPF_CALL;
3507                         insns[i].imm = BPF_FUNC_tail_call;
3508                         /* fall-through */
3509                 }
3510                 if (code == (BPF_JMP | BPF_CALL) ||
3511                     code == (BPF_JMP | BPF_CALL_ARGS)) {
3512                         if (code == (BPF_JMP | BPF_CALL_ARGS))
3513                                 insns[i].code = BPF_JMP | BPF_CALL;
3514                         if (!bpf_dump_raw_ok(f_cred))
3515                                 insns[i].imm = 0;
3516                         continue;
3517                 }
3518                 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3519                         insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3520                         continue;
3521                 }
3522
3523                 if (code != (BPF_LD | BPF_IMM | BPF_DW))
3524                         continue;
3525
3526                 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3527                 map = bpf_map_from_imm(prog, imm, &off, &type);
3528                 if (map) {
3529                         insns[i].src_reg = type;
3530                         insns[i].imm = map->id;
3531                         insns[i + 1].imm = off;
3532                         continue;
3533                 }
3534         }
3535
3536         return insns;
3537 }
3538
3539 static int set_info_rec_size(struct bpf_prog_info *info)
3540 {
3541         /*
3542          * Ensure info.*_rec_size is the same as kernel expected size
3543          *
3544          * or
3545          *
3546          * Only allow zero *_rec_size if both _rec_size and _cnt are
3547          * zero.  In this case, the kernel will set the expected
3548          * _rec_size back to the info.
3549          */
3550
3551         if ((info->nr_func_info || info->func_info_rec_size) &&
3552             info->func_info_rec_size != sizeof(struct bpf_func_info))
3553                 return -EINVAL;
3554
3555         if ((info->nr_line_info || info->line_info_rec_size) &&
3556             info->line_info_rec_size != sizeof(struct bpf_line_info))
3557                 return -EINVAL;
3558
3559         if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3560             info->jited_line_info_rec_size != sizeof(__u64))
3561                 return -EINVAL;
3562
3563         info->func_info_rec_size = sizeof(struct bpf_func_info);
3564         info->line_info_rec_size = sizeof(struct bpf_line_info);
3565         info->jited_line_info_rec_size = sizeof(__u64);
3566
3567         return 0;
3568 }
3569
3570 static int bpf_prog_get_info_by_fd(struct file *file,
3571                                    struct bpf_prog *prog,
3572                                    const union bpf_attr *attr,
3573                                    union bpf_attr __user *uattr)
3574 {
3575         struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3576         struct bpf_prog_info info;
3577         u32 info_len = attr->info.info_len;
3578         struct bpf_prog_stats stats;
3579         char __user *uinsns;
3580         u32 ulen;
3581         int err;
3582
3583         err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3584         if (err)
3585                 return err;
3586         info_len = min_t(u32, sizeof(info), info_len);
3587
3588         memset(&info, 0, sizeof(info));
3589         if (copy_from_user(&info, uinfo, info_len))
3590                 return -EFAULT;
3591
3592         info.type = prog->type;
3593         info.id = prog->aux->id;
3594         info.load_time = prog->aux->load_time;
3595         info.created_by_uid = from_kuid_munged(current_user_ns(),
3596                                                prog->aux->user->uid);
3597         info.gpl_compatible = prog->gpl_compatible;
3598
3599         memcpy(info.tag, prog->tag, sizeof(prog->tag));
3600         memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3601
3602         mutex_lock(&prog->aux->used_maps_mutex);
3603         ulen = info.nr_map_ids;
3604         info.nr_map_ids = prog->aux->used_map_cnt;
3605         ulen = min_t(u32, info.nr_map_ids, ulen);
3606         if (ulen) {
3607                 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3608                 u32 i;
3609
3610                 for (i = 0; i < ulen; i++)
3611                         if (put_user(prog->aux->used_maps[i]->id,
3612                                      &user_map_ids[i])) {
3613                                 mutex_unlock(&prog->aux->used_maps_mutex);
3614                                 return -EFAULT;
3615                         }
3616         }
3617         mutex_unlock(&prog->aux->used_maps_mutex);
3618
3619         err = set_info_rec_size(&info);
3620         if (err)
3621                 return err;
3622
3623         bpf_prog_get_stats(prog, &stats);
3624         info.run_time_ns = stats.nsecs;
3625         info.run_cnt = stats.cnt;
3626         info.recursion_misses = stats.misses;
3627
3628         if (!bpf_capable()) {
3629                 info.jited_prog_len = 0;
3630                 info.xlated_prog_len = 0;
3631                 info.nr_jited_ksyms = 0;
3632                 info.nr_jited_func_lens = 0;
3633                 info.nr_func_info = 0;
3634                 info.nr_line_info = 0;
3635                 info.nr_jited_line_info = 0;
3636                 goto done;
3637         }
3638
3639         ulen = info.xlated_prog_len;
3640         info.xlated_prog_len = bpf_prog_insn_size(prog);
3641         if (info.xlated_prog_len && ulen) {
3642                 struct bpf_insn *insns_sanitized;
3643                 bool fault;
3644
3645                 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3646                         info.xlated_prog_insns = 0;
3647                         goto done;
3648                 }
3649                 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3650                 if (!insns_sanitized)
3651                         return -ENOMEM;
3652                 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3653                 ulen = min_t(u32, info.xlated_prog_len, ulen);
3654                 fault = copy_to_user(uinsns, insns_sanitized, ulen);
3655                 kfree(insns_sanitized);
3656                 if (fault)
3657                         return -EFAULT;
3658         }
3659
3660         if (bpf_prog_is_dev_bound(prog->aux)) {
3661                 err = bpf_prog_offload_info_fill(&info, prog);
3662                 if (err)
3663                         return err;
3664                 goto done;
3665         }
3666
3667         /* NOTE: the following code is supposed to be skipped for offload.
3668          * bpf_prog_offload_info_fill() is the place to fill similar fields
3669          * for offload.
3670          */
3671         ulen = info.jited_prog_len;
3672         if (prog->aux->func_cnt) {
3673                 u32 i;
3674
3675                 info.jited_prog_len = 0;
3676                 for (i = 0; i < prog->aux->func_cnt; i++)
3677                         info.jited_prog_len += prog->aux->func[i]->jited_len;
3678         } else {
3679                 info.jited_prog_len = prog->jited_len;
3680         }
3681
3682         if (info.jited_prog_len && ulen) {
3683                 if (bpf_dump_raw_ok(file->f_cred)) {
3684                         uinsns = u64_to_user_ptr(info.jited_prog_insns);
3685                         ulen = min_t(u32, info.jited_prog_len, ulen);
3686
3687                         /* for multi-function programs, copy the JITed
3688                          * instructions for all the functions
3689                          */
3690                         if (prog->aux->func_cnt) {
3691                                 u32 len, free, i;
3692                                 u8 *img;
3693
3694                                 free = ulen;
3695                                 for (i = 0; i < prog->aux->func_cnt; i++) {
3696                                         len = prog->aux->func[i]->jited_len;
3697                                         len = min_t(u32, len, free);
3698                                         img = (u8 *) prog->aux->func[i]->bpf_func;
3699                                         if (copy_to_user(uinsns, img, len))
3700                                                 return -EFAULT;
3701                                         uinsns += len;
3702                                         free -= len;
3703                                         if (!free)
3704                                                 break;
3705                                 }
3706                         } else {
3707                                 if (copy_to_user(uinsns, prog->bpf_func, ulen))
3708                                         return -EFAULT;
3709                         }
3710                 } else {
3711                         info.jited_prog_insns = 0;
3712                 }
3713         }
3714
3715         ulen = info.nr_jited_ksyms;
3716         info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3717         if (ulen) {
3718                 if (bpf_dump_raw_ok(file->f_cred)) {
3719                         unsigned long ksym_addr;
3720                         u64 __user *user_ksyms;
3721                         u32 i;
3722
3723                         /* copy the address of the kernel symbol
3724                          * corresponding to each function
3725                          */
3726                         ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3727                         user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3728                         if (prog->aux->func_cnt) {
3729                                 for (i = 0; i < ulen; i++) {
3730                                         ksym_addr = (unsigned long)
3731                                                 prog->aux->func[i]->bpf_func;
3732                                         if (put_user((u64) ksym_addr,
3733                                                      &user_ksyms[i]))
3734                                                 return -EFAULT;
3735                                 }
3736                         } else {
3737                                 ksym_addr = (unsigned long) prog->bpf_func;
3738                                 if (put_user((u64) ksym_addr, &user_ksyms[0]))
3739                                         return -EFAULT;
3740                         }
3741                 } else {
3742                         info.jited_ksyms = 0;
3743                 }
3744         }
3745
3746         ulen = info.nr_jited_func_lens;
3747         info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3748         if (ulen) {
3749                 if (bpf_dump_raw_ok(file->f_cred)) {
3750                         u32 __user *user_lens;
3751                         u32 func_len, i;
3752
3753                         /* copy the JITed image lengths for each function */
3754                         ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3755                         user_lens = u64_to_user_ptr(info.jited_func_lens);
3756                         if (prog->aux->func_cnt) {
3757                                 for (i = 0; i < ulen; i++) {
3758                                         func_len =
3759                                                 prog->aux->func[i]->jited_len;
3760                                         if (put_user(func_len, &user_lens[i]))
3761                                                 return -EFAULT;
3762                                 }
3763                         } else {
3764                                 func_len = prog->jited_len;
3765                                 if (put_user(func_len, &user_lens[0]))
3766                                         return -EFAULT;
3767                         }
3768                 } else {
3769                         info.jited_func_lens = 0;
3770                 }
3771         }
3772
3773         if (prog->aux->btf)
3774                 info.btf_id = btf_obj_id(prog->aux->btf);
3775
3776         ulen = info.nr_func_info;
3777         info.nr_func_info = prog->aux->func_info_cnt;
3778         if (info.nr_func_info && ulen) {
3779                 char __user *user_finfo;
3780
3781                 user_finfo = u64_to_user_ptr(info.func_info);
3782                 ulen = min_t(u32, info.nr_func_info, ulen);
3783                 if (copy_to_user(user_finfo, prog->aux->func_info,
3784                                  info.func_info_rec_size * ulen))
3785                         return -EFAULT;
3786         }
3787
3788         ulen = info.nr_line_info;
3789         info.nr_line_info = prog->aux->nr_linfo;
3790         if (info.nr_line_info && ulen) {
3791                 __u8 __user *user_linfo;
3792
3793                 user_linfo = u64_to_user_ptr(info.line_info);
3794                 ulen = min_t(u32, info.nr_line_info, ulen);
3795                 if (copy_to_user(user_linfo, prog->aux->linfo,
3796                                  info.line_info_rec_size * ulen))
3797                         return -EFAULT;
3798         }
3799
3800         ulen = info.nr_jited_line_info;
3801         if (prog->aux->jited_linfo)
3802                 info.nr_jited_line_info = prog->aux->nr_linfo;
3803         else
3804                 info.nr_jited_line_info = 0;
3805         if (info.nr_jited_line_info && ulen) {
3806                 if (bpf_dump_raw_ok(file->f_cred)) {
3807                         __u64 __user *user_linfo;
3808                         u32 i;
3809
3810                         user_linfo = u64_to_user_ptr(info.jited_line_info);
3811                         ulen = min_t(u32, info.nr_jited_line_info, ulen);
3812                         for (i = 0; i < ulen; i++) {
3813                                 if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3814                                              &user_linfo[i]))
3815                                         return -EFAULT;
3816                         }
3817                 } else {
3818                         info.jited_line_info = 0;
3819                 }
3820         }
3821
3822         ulen = info.nr_prog_tags;
3823         info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3824         if (ulen) {
3825                 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3826                 u32 i;
3827
3828                 user_prog_tags = u64_to_user_ptr(info.prog_tags);
3829                 ulen = min_t(u32, info.nr_prog_tags, ulen);
3830                 if (prog->aux->func_cnt) {
3831                         for (i = 0; i < ulen; i++) {
3832                                 if (copy_to_user(user_prog_tags[i],
3833                                                  prog->aux->func[i]->tag,
3834                                                  BPF_TAG_SIZE))
3835                                         return -EFAULT;
3836                         }
3837                 } else {
3838                         if (copy_to_user(user_prog_tags[0],
3839                                          prog->tag, BPF_TAG_SIZE))
3840                                 return -EFAULT;
3841                 }
3842         }
3843
3844 done:
3845         if (copy_to_user(uinfo, &info, info_len) ||
3846             put_user(info_len, &uattr->info.info_len))
3847                 return -EFAULT;
3848
3849         return 0;
3850 }
3851
3852 static int bpf_map_get_info_by_fd(struct file *file,
3853                                   struct bpf_map *map,
3854                                   const union bpf_attr *attr,
3855                                   union bpf_attr __user *uattr)
3856 {
3857         struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3858         struct bpf_map_info info;
3859         u32 info_len = attr->info.info_len;
3860         int err;
3861
3862         err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3863         if (err)
3864                 return err;
3865         info_len = min_t(u32, sizeof(info), info_len);
3866
3867         memset(&info, 0, sizeof(info));
3868         info.type = map->map_type;
3869         info.id = map->id;
3870         info.key_size = map->key_size;
3871         info.value_size = map->value_size;
3872         info.max_entries = map->max_entries;
3873         info.map_flags = map->map_flags;
3874         memcpy(info.name, map->name, sizeof(map->name));
3875
3876         if (map->btf) {
3877                 info.btf_id = btf_obj_id(map->btf);
3878                 info.btf_key_type_id = map->btf_key_type_id;
3879                 info.btf_value_type_id = map->btf_value_type_id;
3880         }
3881         info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3882
3883         if (bpf_map_is_dev_bound(map)) {
3884                 err = bpf_map_offload_info_fill(&info, map);
3885                 if (err)
3886                         return err;
3887         }
3888
3889         if (copy_to_user(uinfo, &info, info_len) ||
3890             put_user(info_len, &uattr->info.info_len))
3891                 return -EFAULT;
3892
3893         return 0;
3894 }
3895
3896 static int bpf_btf_get_info_by_fd(struct file *file,
3897                                   struct btf *btf,
3898                                   const union bpf_attr *attr,
3899                                   union bpf_attr __user *uattr)
3900 {
3901         struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3902         u32 info_len = attr->info.info_len;
3903         int err;
3904
3905         err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
3906         if (err)
3907                 return err;
3908
3909         return btf_get_info_by_fd(btf, attr, uattr);
3910 }
3911
3912 static int bpf_link_get_info_by_fd(struct file *file,
3913                                   struct bpf_link *link,
3914                                   const union bpf_attr *attr,
3915                                   union bpf_attr __user *uattr)
3916 {
3917         struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3918         struct bpf_link_info info;
3919         u32 info_len = attr->info.info_len;
3920         int err;
3921
3922         err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3923         if (err)
3924                 return err;
3925         info_len = min_t(u32, sizeof(info), info_len);
3926
3927         memset(&info, 0, sizeof(info));
3928         if (copy_from_user(&info, uinfo, info_len))
3929                 return -EFAULT;
3930
3931         info.type = link->type;
3932         info.id = link->id;
3933         info.prog_id = link->prog->aux->id;
3934
3935         if (link->ops->fill_link_info) {
3936                 err = link->ops->fill_link_info(link, &info);
3937                 if (err)
3938                         return err;
3939         }
3940
3941         if (copy_to_user(uinfo, &info, info_len) ||
3942             put_user(info_len, &uattr->info.info_len))
3943                 return -EFAULT;
3944
3945         return 0;
3946 }
3947
3948
3949 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3950
3951 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3952                                   union bpf_attr __user *uattr)
3953 {
3954         int ufd = attr->info.bpf_fd;
3955         struct fd f;
3956         int err;
3957
3958         if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3959                 return -EINVAL;
3960
3961         f = fdget(ufd);
3962         if (!f.file)
3963                 return -EBADFD;
3964
3965         if (f.file->f_op == &bpf_prog_fops)
3966                 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
3967                                               uattr);
3968         else if (f.file->f_op == &bpf_map_fops)
3969                 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
3970                                              uattr);
3971         else if (f.file->f_op == &btf_fops)
3972                 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
3973         else if (f.file->f_op == &bpf_link_fops)
3974                 err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
3975                                               attr, uattr);
3976         else
3977                 err = -EINVAL;
3978
3979         fdput(f);
3980         return err;
3981 }
3982
3983 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3984
3985 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
3986 {
3987         if (CHECK_ATTR(BPF_BTF_LOAD))
3988                 return -EINVAL;
3989
3990         if (!bpf_capable())
3991                 return -EPERM;
3992
3993         return btf_new_fd(attr, uattr);
3994 }
3995
3996 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3997
3998 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3999 {
4000         if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
4001                 return -EINVAL;
4002
4003         if (!capable(CAP_SYS_ADMIN))
4004                 return -EPERM;
4005
4006         return btf_get_fd_by_id(attr->btf_id);
4007 }
4008
4009 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
4010                                     union bpf_attr __user *uattr,
4011                                     u32 prog_id, u32 fd_type,
4012                                     const char *buf, u64 probe_offset,
4013                                     u64 probe_addr)
4014 {
4015         char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
4016         u32 len = buf ? strlen(buf) : 0, input_len;
4017         int err = 0;
4018
4019         if (put_user(len, &uattr->task_fd_query.buf_len))
4020                 return -EFAULT;
4021         input_len = attr->task_fd_query.buf_len;
4022         if (input_len && ubuf) {
4023                 if (!len) {
4024                         /* nothing to copy, just make ubuf NULL terminated */
4025                         char zero = '\0';
4026
4027                         if (put_user(zero, ubuf))
4028                                 return -EFAULT;
4029                 } else if (input_len >= len + 1) {
4030                         /* ubuf can hold the string with NULL terminator */
4031                         if (copy_to_user(ubuf, buf, len + 1))
4032                                 return -EFAULT;
4033                 } else {
4034                         /* ubuf cannot hold the string with NULL terminator,
4035                          * do a partial copy with NULL terminator.
4036                          */
4037                         char zero = '\0';
4038
4039                         err = -ENOSPC;
4040                         if (copy_to_user(ubuf, buf, input_len - 1))
4041                                 return -EFAULT;
4042                         if (put_user(zero, ubuf + input_len - 1))
4043                                 return -EFAULT;
4044                 }
4045         }
4046
4047         if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
4048             put_user(fd_type, &uattr->task_fd_query.fd_type) ||
4049             put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
4050             put_user(probe_addr, &uattr->task_fd_query.probe_addr))
4051                 return -EFAULT;
4052
4053         return err;
4054 }
4055
4056 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
4057
4058 static int bpf_task_fd_query(const union bpf_attr *attr,
4059                              union bpf_attr __user *uattr)
4060 {
4061         pid_t pid = attr->task_fd_query.pid;
4062         u32 fd = attr->task_fd_query.fd;
4063         const struct perf_event *event;
4064         struct task_struct *task;
4065         struct file *file;
4066         int err;
4067
4068         if (CHECK_ATTR(BPF_TASK_FD_QUERY))
4069                 return -EINVAL;
4070
4071         if (!capable(CAP_SYS_ADMIN))
4072                 return -EPERM;
4073
4074         if (attr->task_fd_query.flags != 0)
4075                 return -EINVAL;
4076
4077         task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
4078         if (!task)
4079                 return -ENOENT;
4080
4081         err = 0;
4082         file = fget_task(task, fd);
4083         put_task_struct(task);
4084         if (!file)
4085                 return -EBADF;
4086
4087         if (file->f_op == &bpf_link_fops) {
4088                 struct bpf_link *link = file->private_data;
4089
4090                 if (link->ops == &bpf_raw_tp_link_lops) {
4091                         struct bpf_raw_tp_link *raw_tp =
4092                                 container_of(link, struct bpf_raw_tp_link, link);
4093                         struct bpf_raw_event_map *btp = raw_tp->btp;
4094
4095                         err = bpf_task_fd_query_copy(attr, uattr,
4096                                                      raw_tp->link.prog->aux->id,
4097                                                      BPF_FD_TYPE_RAW_TRACEPOINT,
4098                                                      btp->tp->name, 0, 0);
4099                         goto put_file;
4100                 }
4101                 goto out_not_supp;
4102         }
4103
4104         event = perf_get_event(file);
4105         if (!IS_ERR(event)) {
4106                 u64 probe_offset, probe_addr;
4107                 u32 prog_id, fd_type;
4108                 const char *buf;
4109
4110                 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
4111                                               &buf, &probe_offset,
4112                                               &probe_addr);
4113                 if (!err)
4114                         err = bpf_task_fd_query_copy(attr, uattr, prog_id,
4115                                                      fd_type, buf,
4116                                                      probe_offset,
4117                                                      probe_addr);
4118                 goto put_file;
4119         }
4120
4121 out_not_supp:
4122         err = -ENOTSUPP;
4123 put_file:
4124         fput(file);
4125         return err;
4126 }
4127
4128 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
4129
4130 #define BPF_DO_BATCH(fn)                        \
4131         do {                                    \
4132                 if (!fn) {                      \
4133                         err = -ENOTSUPP;        \
4134                         goto err_put;           \
4135                 }                               \
4136                 err = fn(map, attr, uattr);     \
4137         } while (0)
4138
4139 static int bpf_map_do_batch(const union bpf_attr *attr,
4140                             union bpf_attr __user *uattr,
4141                             int cmd)
4142 {
4143         struct bpf_map *map;
4144         int err, ufd;
4145         struct fd f;
4146
4147         if (CHECK_ATTR(BPF_MAP_BATCH))
4148                 return -EINVAL;
4149
4150         ufd = attr->batch.map_fd;
4151         f = fdget(ufd);
4152         map = __bpf_map_get(f);
4153         if (IS_ERR(map))
4154                 return PTR_ERR(map);
4155
4156         if ((cmd == BPF_MAP_LOOKUP_BATCH ||
4157              cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
4158             !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4159                 err = -EPERM;
4160                 goto err_put;
4161         }
4162
4163         if (cmd != BPF_MAP_LOOKUP_BATCH &&
4164             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4165                 err = -EPERM;
4166                 goto err_put;
4167         }
4168
4169         if (cmd == BPF_MAP_LOOKUP_BATCH)
4170                 BPF_DO_BATCH(map->ops->map_lookup_batch);
4171         else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
4172                 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
4173         else if (cmd == BPF_MAP_UPDATE_BATCH)
4174                 BPF_DO_BATCH(map->ops->map_update_batch);
4175         else
4176                 BPF_DO_BATCH(map->ops->map_delete_batch);
4177
4178 err_put:
4179         fdput(f);
4180         return err;
4181 }
4182
4183 static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
4184                                    struct bpf_prog *prog)
4185 {
4186         if (attr->link_create.attach_type != prog->expected_attach_type)
4187                 return -EINVAL;
4188
4189         if (prog->expected_attach_type == BPF_TRACE_ITER)
4190                 return bpf_iter_link_attach(attr, uattr, prog);
4191         else if (prog->type == BPF_PROG_TYPE_EXT)
4192                 return bpf_tracing_prog_attach(prog,
4193                                                attr->link_create.target_fd,
4194                                                attr->link_create.target_btf_id);
4195         return -EINVAL;
4196 }
4197
4198 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
4199 static int link_create(union bpf_attr *attr, bpfptr_t uattr)
4200 {
4201         enum bpf_prog_type ptype;
4202         struct bpf_prog *prog;
4203         int ret;
4204
4205         if (CHECK_ATTR(BPF_LINK_CREATE))
4206                 return -EINVAL;
4207
4208         prog = bpf_prog_get(attr->link_create.prog_fd);
4209         if (IS_ERR(prog))
4210                 return PTR_ERR(prog);
4211
4212         ret = bpf_prog_attach_check_attach_type(prog,
4213                                                 attr->link_create.attach_type);
4214         if (ret)
4215                 goto out;
4216
4217         switch (prog->type) {
4218         case BPF_PROG_TYPE_EXT:
4219                 ret = tracing_bpf_link_attach(attr, uattr, prog);
4220                 goto out;
4221         case BPF_PROG_TYPE_PERF_EVENT:
4222         case BPF_PROG_TYPE_KPROBE:
4223         case BPF_PROG_TYPE_TRACEPOINT:
4224                 if (attr->link_create.attach_type != BPF_PERF_EVENT) {
4225                         ret = -EINVAL;
4226                         goto out;
4227                 }
4228                 ptype = prog->type;
4229                 break;
4230         default:
4231                 ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4232                 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4233                         ret = -EINVAL;
4234                         goto out;
4235                 }
4236                 break;
4237         }
4238
4239         switch (ptype) {
4240         case BPF_PROG_TYPE_CGROUP_SKB:
4241         case BPF_PROG_TYPE_CGROUP_SOCK:
4242         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4243         case BPF_PROG_TYPE_SOCK_OPS:
4244         case BPF_PROG_TYPE_CGROUP_DEVICE:
4245         case BPF_PROG_TYPE_CGROUP_SYSCTL:
4246         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4247                 ret = cgroup_bpf_link_attach(attr, prog);
4248                 break;
4249         case BPF_PROG_TYPE_TRACING:
4250                 ret = tracing_bpf_link_attach(attr, uattr, prog);
4251                 break;
4252         case BPF_PROG_TYPE_FLOW_DISSECTOR:
4253         case BPF_PROG_TYPE_SK_LOOKUP:
4254                 ret = netns_bpf_link_create(attr, prog);
4255                 break;
4256 #ifdef CONFIG_NET
4257         case BPF_PROG_TYPE_XDP:
4258                 ret = bpf_xdp_link_attach(attr, prog);
4259                 break;
4260 #endif
4261 #ifdef CONFIG_PERF_EVENTS
4262         case BPF_PROG_TYPE_PERF_EVENT:
4263         case BPF_PROG_TYPE_TRACEPOINT:
4264         case BPF_PROG_TYPE_KPROBE:
4265                 ret = bpf_perf_link_attach(attr, prog);
4266                 break;
4267 #endif
4268         default:
4269                 ret = -EINVAL;
4270         }
4271
4272 out:
4273         if (ret < 0)
4274                 bpf_prog_put(prog);
4275         return ret;
4276 }
4277
4278 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
4279
4280 static int link_update(union bpf_attr *attr)
4281 {
4282         struct bpf_prog *old_prog = NULL, *new_prog;
4283         struct bpf_link *link;
4284         u32 flags;
4285         int ret;
4286
4287         if (CHECK_ATTR(BPF_LINK_UPDATE))
4288                 return -EINVAL;
4289
4290         flags = attr->link_update.flags;
4291         if (flags & ~BPF_F_REPLACE)
4292                 return -EINVAL;
4293
4294         link = bpf_link_get_from_fd(attr->link_update.link_fd);
4295         if (IS_ERR(link))
4296                 return PTR_ERR(link);
4297
4298         new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
4299         if (IS_ERR(new_prog)) {
4300                 ret = PTR_ERR(new_prog);
4301                 goto out_put_link;
4302         }
4303
4304         if (flags & BPF_F_REPLACE) {
4305                 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
4306                 if (IS_ERR(old_prog)) {
4307                         ret = PTR_ERR(old_prog);
4308                         old_prog = NULL;
4309                         goto out_put_progs;
4310                 }
4311         } else if (attr->link_update.old_prog_fd) {
4312                 ret = -EINVAL;
4313                 goto out_put_progs;
4314         }
4315
4316         if (link->ops->update_prog)
4317                 ret = link->ops->update_prog(link, new_prog, old_prog);
4318         else
4319                 ret = -EINVAL;
4320
4321 out_put_progs:
4322         if (old_prog)
4323                 bpf_prog_put(old_prog);
4324         if (ret)
4325                 bpf_prog_put(new_prog);
4326 out_put_link:
4327         bpf_link_put(link);
4328         return ret;
4329 }
4330
4331 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4332
4333 static int link_detach(union bpf_attr *attr)
4334 {
4335         struct bpf_link *link;
4336         int ret;
4337
4338         if (CHECK_ATTR(BPF_LINK_DETACH))
4339                 return -EINVAL;
4340
4341         link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4342         if (IS_ERR(link))
4343                 return PTR_ERR(link);
4344
4345         if (link->ops->detach)
4346                 ret = link->ops->detach(link);
4347         else
4348                 ret = -EOPNOTSUPP;
4349
4350         bpf_link_put(link);
4351         return ret;
4352 }
4353
4354 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4355 {
4356         return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4357 }
4358
4359 struct bpf_link *bpf_link_by_id(u32 id)
4360 {
4361         struct bpf_link *link;
4362
4363         if (!id)
4364                 return ERR_PTR(-ENOENT);
4365
4366         spin_lock_bh(&link_idr_lock);
4367         /* before link is "settled", ID is 0, pretend it doesn't exist yet */
4368         link = idr_find(&link_idr, id);
4369         if (link) {
4370                 if (link->id)
4371                         link = bpf_link_inc_not_zero(link);
4372                 else
4373                         link = ERR_PTR(-EAGAIN);
4374         } else {
4375                 link = ERR_PTR(-ENOENT);
4376         }
4377         spin_unlock_bh(&link_idr_lock);
4378         return link;
4379 }
4380
4381 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4382
4383 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4384 {
4385         struct bpf_link *link;
4386         u32 id = attr->link_id;
4387         int fd;
4388
4389         if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4390                 return -EINVAL;
4391
4392         if (!capable(CAP_SYS_ADMIN))
4393                 return -EPERM;
4394
4395         link = bpf_link_by_id(id);
4396         if (IS_ERR(link))
4397                 return PTR_ERR(link);
4398
4399         fd = bpf_link_new_fd(link);
4400         if (fd < 0)
4401                 bpf_link_put(link);
4402
4403         return fd;
4404 }
4405
4406 DEFINE_MUTEX(bpf_stats_enabled_mutex);
4407
4408 static int bpf_stats_release(struct inode *inode, struct file *file)
4409 {
4410         mutex_lock(&bpf_stats_enabled_mutex);
4411         static_key_slow_dec(&bpf_stats_enabled_key.key);
4412         mutex_unlock(&bpf_stats_enabled_mutex);
4413         return 0;
4414 }
4415
4416 static const struct file_operations bpf_stats_fops = {
4417         .release = bpf_stats_release,
4418 };
4419
4420 static int bpf_enable_runtime_stats(void)
4421 {
4422         int fd;
4423
4424         mutex_lock(&bpf_stats_enabled_mutex);
4425
4426         /* Set a very high limit to avoid overflow */
4427         if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4428                 mutex_unlock(&bpf_stats_enabled_mutex);
4429                 return -EBUSY;
4430         }
4431
4432         fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4433         if (fd >= 0)
4434                 static_key_slow_inc(&bpf_stats_enabled_key.key);
4435
4436         mutex_unlock(&bpf_stats_enabled_mutex);
4437         return fd;
4438 }
4439
4440 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4441
4442 static int bpf_enable_stats(union bpf_attr *attr)
4443 {
4444
4445         if (CHECK_ATTR(BPF_ENABLE_STATS))
4446                 return -EINVAL;
4447
4448         if (!capable(CAP_SYS_ADMIN))
4449                 return -EPERM;
4450
4451         switch (attr->enable_stats.type) {
4452         case BPF_STATS_RUN_TIME:
4453                 return bpf_enable_runtime_stats();
4454         default:
4455                 break;
4456         }
4457         return -EINVAL;
4458 }
4459
4460 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4461
4462 static int bpf_iter_create(union bpf_attr *attr)
4463 {
4464         struct bpf_link *link;
4465         int err;
4466
4467         if (CHECK_ATTR(BPF_ITER_CREATE))
4468                 return -EINVAL;
4469
4470         if (attr->iter_create.flags)
4471                 return -EINVAL;
4472
4473         link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4474         if (IS_ERR(link))
4475                 return PTR_ERR(link);
4476
4477         err = bpf_iter_new_fd(link);
4478         bpf_link_put(link);
4479
4480         return err;
4481 }
4482
4483 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4484
4485 static int bpf_prog_bind_map(union bpf_attr *attr)
4486 {
4487         struct bpf_prog *prog;
4488         struct bpf_map *map;
4489         struct bpf_map **used_maps_old, **used_maps_new;
4490         int i, ret = 0;
4491
4492         if (CHECK_ATTR(BPF_PROG_BIND_MAP))
4493                 return -EINVAL;
4494
4495         if (attr->prog_bind_map.flags)
4496                 return -EINVAL;
4497
4498         prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4499         if (IS_ERR(prog))
4500                 return PTR_ERR(prog);
4501
4502         map = bpf_map_get(attr->prog_bind_map.map_fd);
4503         if (IS_ERR(map)) {
4504                 ret = PTR_ERR(map);
4505                 goto out_prog_put;
4506         }
4507
4508         mutex_lock(&prog->aux->used_maps_mutex);
4509
4510         used_maps_old = prog->aux->used_maps;
4511
4512         for (i = 0; i < prog->aux->used_map_cnt; i++)
4513                 if (used_maps_old[i] == map) {
4514                         bpf_map_put(map);
4515                         goto out_unlock;
4516                 }
4517
4518         used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
4519                                       sizeof(used_maps_new[0]),
4520                                       GFP_KERNEL);
4521         if (!used_maps_new) {
4522                 ret = -ENOMEM;
4523                 goto out_unlock;
4524         }
4525
4526         memcpy(used_maps_new, used_maps_old,
4527                sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4528         used_maps_new[prog->aux->used_map_cnt] = map;
4529
4530         prog->aux->used_map_cnt++;
4531         prog->aux->used_maps = used_maps_new;
4532
4533         kfree(used_maps_old);
4534
4535 out_unlock:
4536         mutex_unlock(&prog->aux->used_maps_mutex);
4537
4538         if (ret)
4539                 bpf_map_put(map);
4540 out_prog_put:
4541         bpf_prog_put(prog);
4542         return ret;
4543 }
4544
4545 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
4546 {
4547         union bpf_attr attr;
4548         int err;
4549
4550         if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4551                 return -EPERM;
4552
4553         err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4554         if (err)
4555                 return err;
4556         size = min_t(u32, size, sizeof(attr));
4557
4558         /* copy attributes from user space, may be less than sizeof(bpf_attr) */
4559         memset(&attr, 0, sizeof(attr));
4560         if (copy_from_bpfptr(&attr, uattr, size) != 0)
4561                 return -EFAULT;
4562
4563         err = security_bpf(cmd, &attr, size);
4564         if (err < 0)
4565                 return err;
4566
4567         switch (cmd) {
4568         case BPF_MAP_CREATE:
4569                 err = map_create(&attr);
4570                 break;
4571         case BPF_MAP_LOOKUP_ELEM:
4572                 err = map_lookup_elem(&attr);
4573                 break;
4574         case BPF_MAP_UPDATE_ELEM:
4575                 err = map_update_elem(&attr, uattr);
4576                 break;
4577         case BPF_MAP_DELETE_ELEM:
4578                 err = map_delete_elem(&attr);
4579                 break;
4580         case BPF_MAP_GET_NEXT_KEY:
4581                 err = map_get_next_key(&attr);
4582                 break;
4583         case BPF_MAP_FREEZE:
4584                 err = map_freeze(&attr);
4585                 break;
4586         case BPF_PROG_LOAD:
4587                 err = bpf_prog_load(&attr, uattr);
4588                 break;
4589         case BPF_OBJ_PIN:
4590                 err = bpf_obj_pin(&attr);
4591                 break;
4592         case BPF_OBJ_GET:
4593                 err = bpf_obj_get(&attr);
4594                 break;
4595         case BPF_PROG_ATTACH:
4596                 err = bpf_prog_attach(&attr);
4597                 break;
4598         case BPF_PROG_DETACH:
4599                 err = bpf_prog_detach(&attr);
4600                 break;
4601         case BPF_PROG_QUERY:
4602                 err = bpf_prog_query(&attr, uattr.user);
4603                 break;
4604         case BPF_PROG_TEST_RUN:
4605                 err = bpf_prog_test_run(&attr, uattr.user);
4606                 break;
4607         case BPF_PROG_GET_NEXT_ID:
4608                 err = bpf_obj_get_next_id(&attr, uattr.user,
4609                                           &prog_idr, &prog_idr_lock);
4610                 break;
4611         case BPF_MAP_GET_NEXT_ID:
4612                 err = bpf_obj_get_next_id(&attr, uattr.user,
4613                                           &map_idr, &map_idr_lock);
4614                 break;
4615         case BPF_BTF_GET_NEXT_ID:
4616                 err = bpf_obj_get_next_id(&attr, uattr.user,
4617                                           &btf_idr, &btf_idr_lock);
4618                 break;
4619         case BPF_PROG_GET_FD_BY_ID:
4620                 err = bpf_prog_get_fd_by_id(&attr);
4621                 break;
4622         case BPF_MAP_GET_FD_BY_ID:
4623                 err = bpf_map_get_fd_by_id(&attr);
4624                 break;
4625         case BPF_OBJ_GET_INFO_BY_FD:
4626                 err = bpf_obj_get_info_by_fd(&attr, uattr.user);
4627                 break;
4628         case BPF_RAW_TRACEPOINT_OPEN:
4629                 err = bpf_raw_tracepoint_open(&attr);
4630                 break;
4631         case BPF_BTF_LOAD:
4632                 err = bpf_btf_load(&attr, uattr);
4633                 break;
4634         case BPF_BTF_GET_FD_BY_ID:
4635                 err = bpf_btf_get_fd_by_id(&attr);
4636                 break;
4637         case BPF_TASK_FD_QUERY:
4638                 err = bpf_task_fd_query(&attr, uattr.user);
4639                 break;
4640         case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4641                 err = map_lookup_and_delete_elem(&attr);
4642                 break;
4643         case BPF_MAP_LOOKUP_BATCH:
4644                 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
4645                 break;
4646         case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4647                 err = bpf_map_do_batch(&attr, uattr.user,
4648                                        BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4649                 break;
4650         case BPF_MAP_UPDATE_BATCH:
4651                 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
4652                 break;
4653         case BPF_MAP_DELETE_BATCH:
4654                 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
4655                 break;
4656         case BPF_LINK_CREATE:
4657                 err = link_create(&attr, uattr);
4658                 break;
4659         case BPF_LINK_UPDATE:
4660                 err = link_update(&attr);
4661                 break;
4662         case BPF_LINK_GET_FD_BY_ID:
4663                 err = bpf_link_get_fd_by_id(&attr);
4664                 break;
4665         case BPF_LINK_GET_NEXT_ID:
4666                 err = bpf_obj_get_next_id(&attr, uattr.user,
4667                                           &link_idr, &link_idr_lock);
4668                 break;
4669         case BPF_ENABLE_STATS:
4670                 err = bpf_enable_stats(&attr);
4671                 break;
4672         case BPF_ITER_CREATE:
4673                 err = bpf_iter_create(&attr);
4674                 break;
4675         case BPF_LINK_DETACH:
4676                 err = link_detach(&attr);
4677                 break;
4678         case BPF_PROG_BIND_MAP:
4679                 err = bpf_prog_bind_map(&attr);
4680                 break;
4681         default:
4682                 err = -EINVAL;
4683                 break;
4684         }
4685
4686         return err;
4687 }
4688
4689 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4690 {
4691         return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
4692 }
4693
4694 static bool syscall_prog_is_valid_access(int off, int size,
4695                                          enum bpf_access_type type,
4696                                          const struct bpf_prog *prog,
4697                                          struct bpf_insn_access_aux *info)
4698 {
4699         if (off < 0 || off >= U16_MAX)
4700                 return false;
4701         if (off % size != 0)
4702                 return false;
4703         return true;
4704 }
4705
4706 BPF_CALL_3(bpf_sys_bpf, int, cmd, void *, attr, u32, attr_size)
4707 {
4708         switch (cmd) {
4709         case BPF_MAP_CREATE:
4710         case BPF_MAP_UPDATE_ELEM:
4711         case BPF_MAP_FREEZE:
4712         case BPF_PROG_LOAD:
4713         case BPF_BTF_LOAD:
4714                 break;
4715         /* case BPF_PROG_TEST_RUN:
4716          * is not part of this list to prevent recursive test_run
4717          */
4718         default:
4719                 return -EINVAL;
4720         }
4721         return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
4722 }
4723
4724 static const struct bpf_func_proto bpf_sys_bpf_proto = {
4725         .func           = bpf_sys_bpf,
4726         .gpl_only       = false,
4727         .ret_type       = RET_INTEGER,
4728         .arg1_type      = ARG_ANYTHING,
4729         .arg2_type      = ARG_PTR_TO_MEM,
4730         .arg3_type      = ARG_CONST_SIZE,
4731 };
4732
4733 const struct bpf_func_proto * __weak
4734 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4735 {
4736         return bpf_base_func_proto(func_id);
4737 }
4738
4739 BPF_CALL_1(bpf_sys_close, u32, fd)
4740 {
4741         /* When bpf program calls this helper there should not be
4742          * an fdget() without matching completed fdput().
4743          * This helper is allowed in the following callchain only:
4744          * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
4745          */
4746         return close_fd(fd);
4747 }
4748
4749 static const struct bpf_func_proto bpf_sys_close_proto = {
4750         .func           = bpf_sys_close,
4751         .gpl_only       = false,
4752         .ret_type       = RET_INTEGER,
4753         .arg1_type      = ARG_ANYTHING,
4754 };
4755
4756 static const struct bpf_func_proto *
4757 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4758 {
4759         switch (func_id) {
4760         case BPF_FUNC_sys_bpf:
4761                 return &bpf_sys_bpf_proto;
4762         case BPF_FUNC_btf_find_by_name_kind:
4763                 return &bpf_btf_find_by_name_kind_proto;
4764         case BPF_FUNC_sys_close:
4765                 return &bpf_sys_close_proto;
4766         default:
4767                 return tracing_prog_func_proto(func_id, prog);
4768         }
4769 }
4770
4771 const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
4772         .get_func_proto  = syscall_prog_func_proto,
4773         .is_valid_access = syscall_prog_is_valid_access,
4774 };
4775
4776 const struct bpf_prog_ops bpf_syscall_prog_ops = {
4777         .test_run = bpf_prog_test_run_syscall,
4778 };