Merge tag 'drm/tegra/for-5.13-rc5' of ssh://git.freedesktop.org/git/tegra/linux into...
[linux-2.6-microblaze.git] / kernel / bpf / syscall.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/bpf_verifier.h>
8 #include <linux/btf.h>
9 #include <linux/syscalls.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/vmalloc.h>
13 #include <linux/mmzone.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/fdtable.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/license.h>
19 #include <linux/filter.h>
20 #include <linux/kernel.h>
21 #include <linux/idr.h>
22 #include <linux/cred.h>
23 #include <linux/timekeeping.h>
24 #include <linux/ctype.h>
25 #include <linux/nospec.h>
26 #include <linux/audit.h>
27 #include <uapi/linux/btf.h>
28 #include <linux/pgtable.h>
29 #include <linux/bpf_lsm.h>
30 #include <linux/poll.h>
31 #include <linux/bpf-netns.h>
32 #include <linux/rcupdate_trace.h>
33 #include <linux/memcontrol.h>
34
35 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
36                           (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
37                           (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
38 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
39 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
40 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
41                         IS_FD_HASH(map))
42
43 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
44
45 DEFINE_PER_CPU(int, bpf_prog_active);
46 static DEFINE_IDR(prog_idr);
47 static DEFINE_SPINLOCK(prog_idr_lock);
48 static DEFINE_IDR(map_idr);
49 static DEFINE_SPINLOCK(map_idr_lock);
50 static DEFINE_IDR(link_idr);
51 static DEFINE_SPINLOCK(link_idr_lock);
52
53 int sysctl_unprivileged_bpf_disabled __read_mostly =
54         IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
55
56 static const struct bpf_map_ops * const bpf_map_types[] = {
57 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
58 #define BPF_MAP_TYPE(_id, _ops) \
59         [_id] = &_ops,
60 #define BPF_LINK_TYPE(_id, _name)
61 #include <linux/bpf_types.h>
62 #undef BPF_PROG_TYPE
63 #undef BPF_MAP_TYPE
64 #undef BPF_LINK_TYPE
65 };
66
67 /*
68  * If we're handed a bigger struct than we know of, ensure all the unknown bits
69  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
70  * we don't know about yet.
71  *
72  * There is a ToCToU between this function call and the following
73  * copy_from_user() call. However, this is not a concern since this function is
74  * meant to be a future-proofing of bits.
75  */
76 int bpf_check_uarg_tail_zero(void __user *uaddr,
77                              size_t expected_size,
78                              size_t actual_size)
79 {
80         unsigned char __user *addr = uaddr + expected_size;
81         int res;
82
83         if (unlikely(actual_size > PAGE_SIZE))  /* silly large */
84                 return -E2BIG;
85
86         if (actual_size <= expected_size)
87                 return 0;
88
89         res = check_zeroed_user(addr, actual_size - expected_size);
90         if (res < 0)
91                 return res;
92         return res ? 0 : -E2BIG;
93 }
94
95 const struct bpf_map_ops bpf_map_offload_ops = {
96         .map_meta_equal = bpf_map_meta_equal,
97         .map_alloc = bpf_map_offload_map_alloc,
98         .map_free = bpf_map_offload_map_free,
99         .map_check_btf = map_check_no_btf,
100 };
101
102 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
103 {
104         const struct bpf_map_ops *ops;
105         u32 type = attr->map_type;
106         struct bpf_map *map;
107         int err;
108
109         if (type >= ARRAY_SIZE(bpf_map_types))
110                 return ERR_PTR(-EINVAL);
111         type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
112         ops = bpf_map_types[type];
113         if (!ops)
114                 return ERR_PTR(-EINVAL);
115
116         if (ops->map_alloc_check) {
117                 err = ops->map_alloc_check(attr);
118                 if (err)
119                         return ERR_PTR(err);
120         }
121         if (attr->map_ifindex)
122                 ops = &bpf_map_offload_ops;
123         map = ops->map_alloc(attr);
124         if (IS_ERR(map))
125                 return map;
126         map->ops = ops;
127         map->map_type = type;
128         return map;
129 }
130
131 static u32 bpf_map_value_size(const struct bpf_map *map)
132 {
133         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
134             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
135             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
136             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
137                 return round_up(map->value_size, 8) * num_possible_cpus();
138         else if (IS_FD_MAP(map))
139                 return sizeof(u32);
140         else
141                 return  map->value_size;
142 }
143
144 static void maybe_wait_bpf_programs(struct bpf_map *map)
145 {
146         /* Wait for any running BPF programs to complete so that
147          * userspace, when we return to it, knows that all programs
148          * that could be running use the new map value.
149          */
150         if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
151             map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
152                 synchronize_rcu();
153 }
154
155 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
156                                 void *value, __u64 flags)
157 {
158         int err;
159
160         /* Need to create a kthread, thus must support schedule */
161         if (bpf_map_is_dev_bound(map)) {
162                 return bpf_map_offload_update_elem(map, key, value, flags);
163         } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
164                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
165                 return map->ops->map_update_elem(map, key, value, flags);
166         } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
167                    map->map_type == BPF_MAP_TYPE_SOCKMAP) {
168                 return sock_map_update_elem_sys(map, key, value, flags);
169         } else if (IS_FD_PROG_ARRAY(map)) {
170                 return bpf_fd_array_map_update_elem(map, f.file, key, value,
171                                                     flags);
172         }
173
174         bpf_disable_instrumentation();
175         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
176             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
177                 err = bpf_percpu_hash_update(map, key, value, flags);
178         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
179                 err = bpf_percpu_array_update(map, key, value, flags);
180         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
181                 err = bpf_percpu_cgroup_storage_update(map, key, value,
182                                                        flags);
183         } else if (IS_FD_ARRAY(map)) {
184                 rcu_read_lock();
185                 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
186                                                    flags);
187                 rcu_read_unlock();
188         } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
189                 rcu_read_lock();
190                 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
191                                                   flags);
192                 rcu_read_unlock();
193         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
194                 /* rcu_read_lock() is not needed */
195                 err = bpf_fd_reuseport_array_update_elem(map, key, value,
196                                                          flags);
197         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
198                    map->map_type == BPF_MAP_TYPE_STACK) {
199                 err = map->ops->map_push_elem(map, value, flags);
200         } else {
201                 rcu_read_lock();
202                 err = map->ops->map_update_elem(map, key, value, flags);
203                 rcu_read_unlock();
204         }
205         bpf_enable_instrumentation();
206         maybe_wait_bpf_programs(map);
207
208         return err;
209 }
210
211 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
212                               __u64 flags)
213 {
214         void *ptr;
215         int err;
216
217         if (bpf_map_is_dev_bound(map))
218                 return bpf_map_offload_lookup_elem(map, key, value);
219
220         bpf_disable_instrumentation();
221         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
222             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
223                 err = bpf_percpu_hash_copy(map, key, value);
224         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
225                 err = bpf_percpu_array_copy(map, key, value);
226         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
227                 err = bpf_percpu_cgroup_storage_copy(map, key, value);
228         } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
229                 err = bpf_stackmap_copy(map, key, value);
230         } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
231                 err = bpf_fd_array_map_lookup_elem(map, key, value);
232         } else if (IS_FD_HASH(map)) {
233                 err = bpf_fd_htab_map_lookup_elem(map, key, value);
234         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
235                 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
236         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
237                    map->map_type == BPF_MAP_TYPE_STACK) {
238                 err = map->ops->map_peek_elem(map, value);
239         } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
240                 /* struct_ops map requires directly updating "value" */
241                 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
242         } else {
243                 rcu_read_lock();
244                 if (map->ops->map_lookup_elem_sys_only)
245                         ptr = map->ops->map_lookup_elem_sys_only(map, key);
246                 else
247                         ptr = map->ops->map_lookup_elem(map, key);
248                 if (IS_ERR(ptr)) {
249                         err = PTR_ERR(ptr);
250                 } else if (!ptr) {
251                         err = -ENOENT;
252                 } else {
253                         err = 0;
254                         if (flags & BPF_F_LOCK)
255                                 /* lock 'ptr' and copy everything but lock */
256                                 copy_map_value_locked(map, value, ptr, true);
257                         else
258                                 copy_map_value(map, value, ptr);
259                         /* mask lock, since value wasn't zero inited */
260                         check_and_init_map_lock(map, value);
261                 }
262                 rcu_read_unlock();
263         }
264
265         bpf_enable_instrumentation();
266         maybe_wait_bpf_programs(map);
267
268         return err;
269 }
270
271 /* Please, do not use this function outside from the map creation path
272  * (e.g. in map update path) without taking care of setting the active
273  * memory cgroup (see at bpf_map_kmalloc_node() for example).
274  */
275 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
276 {
277         /* We really just want to fail instead of triggering OOM killer
278          * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
279          * which is used for lower order allocation requests.
280          *
281          * It has been observed that higher order allocation requests done by
282          * vmalloc with __GFP_NORETRY being set might fail due to not trying
283          * to reclaim memory from the page cache, thus we set
284          * __GFP_RETRY_MAYFAIL to avoid such situations.
285          */
286
287         const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
288         unsigned int flags = 0;
289         unsigned long align = 1;
290         void *area;
291
292         if (size >= SIZE_MAX)
293                 return NULL;
294
295         /* kmalloc()'ed memory can't be mmap()'ed */
296         if (mmapable) {
297                 BUG_ON(!PAGE_ALIGNED(size));
298                 align = SHMLBA;
299                 flags = VM_USERMAP;
300         } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
301                 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
302                                     numa_node);
303                 if (area != NULL)
304                         return area;
305         }
306
307         return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
308                         gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
309                         flags, numa_node, __builtin_return_address(0));
310 }
311
312 void *bpf_map_area_alloc(u64 size, int numa_node)
313 {
314         return __bpf_map_area_alloc(size, numa_node, false);
315 }
316
317 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
318 {
319         return __bpf_map_area_alloc(size, numa_node, true);
320 }
321
322 void bpf_map_area_free(void *area)
323 {
324         kvfree(area);
325 }
326
327 static u32 bpf_map_flags_retain_permanent(u32 flags)
328 {
329         /* Some map creation flags are not tied to the map object but
330          * rather to the map fd instead, so they have no meaning upon
331          * map object inspection since multiple file descriptors with
332          * different (access) properties can exist here. Thus, given
333          * this has zero meaning for the map itself, lets clear these
334          * from here.
335          */
336         return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
337 }
338
339 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
340 {
341         map->map_type = attr->map_type;
342         map->key_size = attr->key_size;
343         map->value_size = attr->value_size;
344         map->max_entries = attr->max_entries;
345         map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
346         map->numa_node = bpf_map_attr_numa_node(attr);
347 }
348
349 static int bpf_map_alloc_id(struct bpf_map *map)
350 {
351         int id;
352
353         idr_preload(GFP_KERNEL);
354         spin_lock_bh(&map_idr_lock);
355         id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
356         if (id > 0)
357                 map->id = id;
358         spin_unlock_bh(&map_idr_lock);
359         idr_preload_end();
360
361         if (WARN_ON_ONCE(!id))
362                 return -ENOSPC;
363
364         return id > 0 ? 0 : id;
365 }
366
367 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
368 {
369         unsigned long flags;
370
371         /* Offloaded maps are removed from the IDR store when their device
372          * disappears - even if someone holds an fd to them they are unusable,
373          * the memory is gone, all ops will fail; they are simply waiting for
374          * refcnt to drop to be freed.
375          */
376         if (!map->id)
377                 return;
378
379         if (do_idr_lock)
380                 spin_lock_irqsave(&map_idr_lock, flags);
381         else
382                 __acquire(&map_idr_lock);
383
384         idr_remove(&map_idr, map->id);
385         map->id = 0;
386
387         if (do_idr_lock)
388                 spin_unlock_irqrestore(&map_idr_lock, flags);
389         else
390                 __release(&map_idr_lock);
391 }
392
393 #ifdef CONFIG_MEMCG_KMEM
394 static void bpf_map_save_memcg(struct bpf_map *map)
395 {
396         map->memcg = get_mem_cgroup_from_mm(current->mm);
397 }
398
399 static void bpf_map_release_memcg(struct bpf_map *map)
400 {
401         mem_cgroup_put(map->memcg);
402 }
403
404 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
405                            int node)
406 {
407         struct mem_cgroup *old_memcg;
408         void *ptr;
409
410         old_memcg = set_active_memcg(map->memcg);
411         ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
412         set_active_memcg(old_memcg);
413
414         return ptr;
415 }
416
417 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
418 {
419         struct mem_cgroup *old_memcg;
420         void *ptr;
421
422         old_memcg = set_active_memcg(map->memcg);
423         ptr = kzalloc(size, flags | __GFP_ACCOUNT);
424         set_active_memcg(old_memcg);
425
426         return ptr;
427 }
428
429 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
430                                     size_t align, gfp_t flags)
431 {
432         struct mem_cgroup *old_memcg;
433         void __percpu *ptr;
434
435         old_memcg = set_active_memcg(map->memcg);
436         ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
437         set_active_memcg(old_memcg);
438
439         return ptr;
440 }
441
442 #else
443 static void bpf_map_save_memcg(struct bpf_map *map)
444 {
445 }
446
447 static void bpf_map_release_memcg(struct bpf_map *map)
448 {
449 }
450 #endif
451
452 /* called from workqueue */
453 static void bpf_map_free_deferred(struct work_struct *work)
454 {
455         struct bpf_map *map = container_of(work, struct bpf_map, work);
456
457         security_bpf_map_free(map);
458         bpf_map_release_memcg(map);
459         /* implementation dependent freeing */
460         map->ops->map_free(map);
461 }
462
463 static void bpf_map_put_uref(struct bpf_map *map)
464 {
465         if (atomic64_dec_and_test(&map->usercnt)) {
466                 if (map->ops->map_release_uref)
467                         map->ops->map_release_uref(map);
468         }
469 }
470
471 /* decrement map refcnt and schedule it for freeing via workqueue
472  * (unrelying map implementation ops->map_free() might sleep)
473  */
474 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
475 {
476         if (atomic64_dec_and_test(&map->refcnt)) {
477                 /* bpf_map_free_id() must be called first */
478                 bpf_map_free_id(map, do_idr_lock);
479                 btf_put(map->btf);
480                 INIT_WORK(&map->work, bpf_map_free_deferred);
481                 schedule_work(&map->work);
482         }
483 }
484
485 void bpf_map_put(struct bpf_map *map)
486 {
487         __bpf_map_put(map, true);
488 }
489 EXPORT_SYMBOL_GPL(bpf_map_put);
490
491 void bpf_map_put_with_uref(struct bpf_map *map)
492 {
493         bpf_map_put_uref(map);
494         bpf_map_put(map);
495 }
496
497 static int bpf_map_release(struct inode *inode, struct file *filp)
498 {
499         struct bpf_map *map = filp->private_data;
500
501         if (map->ops->map_release)
502                 map->ops->map_release(map, filp);
503
504         bpf_map_put_with_uref(map);
505         return 0;
506 }
507
508 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
509 {
510         fmode_t mode = f.file->f_mode;
511
512         /* Our file permissions may have been overridden by global
513          * map permissions facing syscall side.
514          */
515         if (READ_ONCE(map->frozen))
516                 mode &= ~FMODE_CAN_WRITE;
517         return mode;
518 }
519
520 #ifdef CONFIG_PROC_FS
521 /* Provides an approximation of the map's memory footprint.
522  * Used only to provide a backward compatibility and display
523  * a reasonable "memlock" info.
524  */
525 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
526 {
527         unsigned long size;
528
529         size = round_up(map->key_size + bpf_map_value_size(map), 8);
530
531         return round_up(map->max_entries * size, PAGE_SIZE);
532 }
533
534 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
535 {
536         const struct bpf_map *map = filp->private_data;
537         const struct bpf_array *array;
538         u32 type = 0, jited = 0;
539
540         if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
541                 array = container_of(map, struct bpf_array, map);
542                 type  = array->aux->type;
543                 jited = array->aux->jited;
544         }
545
546         seq_printf(m,
547                    "map_type:\t%u\n"
548                    "key_size:\t%u\n"
549                    "value_size:\t%u\n"
550                    "max_entries:\t%u\n"
551                    "map_flags:\t%#x\n"
552                    "memlock:\t%lu\n"
553                    "map_id:\t%u\n"
554                    "frozen:\t%u\n",
555                    map->map_type,
556                    map->key_size,
557                    map->value_size,
558                    map->max_entries,
559                    map->map_flags,
560                    bpf_map_memory_footprint(map),
561                    map->id,
562                    READ_ONCE(map->frozen));
563         if (type) {
564                 seq_printf(m, "owner_prog_type:\t%u\n", type);
565                 seq_printf(m, "owner_jited:\t%u\n", jited);
566         }
567 }
568 #endif
569
570 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
571                               loff_t *ppos)
572 {
573         /* We need this handler such that alloc_file() enables
574          * f_mode with FMODE_CAN_READ.
575          */
576         return -EINVAL;
577 }
578
579 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
580                                size_t siz, loff_t *ppos)
581 {
582         /* We need this handler such that alloc_file() enables
583          * f_mode with FMODE_CAN_WRITE.
584          */
585         return -EINVAL;
586 }
587
588 /* called for any extra memory-mapped regions (except initial) */
589 static void bpf_map_mmap_open(struct vm_area_struct *vma)
590 {
591         struct bpf_map *map = vma->vm_file->private_data;
592
593         if (vma->vm_flags & VM_MAYWRITE) {
594                 mutex_lock(&map->freeze_mutex);
595                 map->writecnt++;
596                 mutex_unlock(&map->freeze_mutex);
597         }
598 }
599
600 /* called for all unmapped memory region (including initial) */
601 static void bpf_map_mmap_close(struct vm_area_struct *vma)
602 {
603         struct bpf_map *map = vma->vm_file->private_data;
604
605         if (vma->vm_flags & VM_MAYWRITE) {
606                 mutex_lock(&map->freeze_mutex);
607                 map->writecnt--;
608                 mutex_unlock(&map->freeze_mutex);
609         }
610 }
611
612 static const struct vm_operations_struct bpf_map_default_vmops = {
613         .open           = bpf_map_mmap_open,
614         .close          = bpf_map_mmap_close,
615 };
616
617 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
618 {
619         struct bpf_map *map = filp->private_data;
620         int err;
621
622         if (!map->ops->map_mmap || map_value_has_spin_lock(map))
623                 return -ENOTSUPP;
624
625         if (!(vma->vm_flags & VM_SHARED))
626                 return -EINVAL;
627
628         mutex_lock(&map->freeze_mutex);
629
630         if (vma->vm_flags & VM_WRITE) {
631                 if (map->frozen) {
632                         err = -EPERM;
633                         goto out;
634                 }
635                 /* map is meant to be read-only, so do not allow mapping as
636                  * writable, because it's possible to leak a writable page
637                  * reference and allows user-space to still modify it after
638                  * freezing, while verifier will assume contents do not change
639                  */
640                 if (map->map_flags & BPF_F_RDONLY_PROG) {
641                         err = -EACCES;
642                         goto out;
643                 }
644         }
645
646         /* set default open/close callbacks */
647         vma->vm_ops = &bpf_map_default_vmops;
648         vma->vm_private_data = map;
649         vma->vm_flags &= ~VM_MAYEXEC;
650         if (!(vma->vm_flags & VM_WRITE))
651                 /* disallow re-mapping with PROT_WRITE */
652                 vma->vm_flags &= ~VM_MAYWRITE;
653
654         err = map->ops->map_mmap(map, vma);
655         if (err)
656                 goto out;
657
658         if (vma->vm_flags & VM_MAYWRITE)
659                 map->writecnt++;
660 out:
661         mutex_unlock(&map->freeze_mutex);
662         return err;
663 }
664
665 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
666 {
667         struct bpf_map *map = filp->private_data;
668
669         if (map->ops->map_poll)
670                 return map->ops->map_poll(map, filp, pts);
671
672         return EPOLLERR;
673 }
674
675 const struct file_operations bpf_map_fops = {
676 #ifdef CONFIG_PROC_FS
677         .show_fdinfo    = bpf_map_show_fdinfo,
678 #endif
679         .release        = bpf_map_release,
680         .read           = bpf_dummy_read,
681         .write          = bpf_dummy_write,
682         .mmap           = bpf_map_mmap,
683         .poll           = bpf_map_poll,
684 };
685
686 int bpf_map_new_fd(struct bpf_map *map, int flags)
687 {
688         int ret;
689
690         ret = security_bpf_map(map, OPEN_FMODE(flags));
691         if (ret < 0)
692                 return ret;
693
694         return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
695                                 flags | O_CLOEXEC);
696 }
697
698 int bpf_get_file_flag(int flags)
699 {
700         if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
701                 return -EINVAL;
702         if (flags & BPF_F_RDONLY)
703                 return O_RDONLY;
704         if (flags & BPF_F_WRONLY)
705                 return O_WRONLY;
706         return O_RDWR;
707 }
708
709 /* helper macro to check that unused fields 'union bpf_attr' are zero */
710 #define CHECK_ATTR(CMD) \
711         memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
712                    sizeof(attr->CMD##_LAST_FIELD), 0, \
713                    sizeof(*attr) - \
714                    offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
715                    sizeof(attr->CMD##_LAST_FIELD)) != NULL
716
717 /* dst and src must have at least "size" number of bytes.
718  * Return strlen on success and < 0 on error.
719  */
720 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
721 {
722         const char *end = src + size;
723         const char *orig_src = src;
724
725         memset(dst, 0, size);
726         /* Copy all isalnum(), '_' and '.' chars. */
727         while (src < end && *src) {
728                 if (!isalnum(*src) &&
729                     *src != '_' && *src != '.')
730                         return -EINVAL;
731                 *dst++ = *src++;
732         }
733
734         /* No '\0' found in "size" number of bytes */
735         if (src == end)
736                 return -EINVAL;
737
738         return src - orig_src;
739 }
740
741 int map_check_no_btf(const struct bpf_map *map,
742                      const struct btf *btf,
743                      const struct btf_type *key_type,
744                      const struct btf_type *value_type)
745 {
746         return -ENOTSUPP;
747 }
748
749 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
750                          u32 btf_key_id, u32 btf_value_id)
751 {
752         const struct btf_type *key_type, *value_type;
753         u32 key_size, value_size;
754         int ret = 0;
755
756         /* Some maps allow key to be unspecified. */
757         if (btf_key_id) {
758                 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
759                 if (!key_type || key_size != map->key_size)
760                         return -EINVAL;
761         } else {
762                 key_type = btf_type_by_id(btf, 0);
763                 if (!map->ops->map_check_btf)
764                         return -EINVAL;
765         }
766
767         value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
768         if (!value_type || value_size != map->value_size)
769                 return -EINVAL;
770
771         map->spin_lock_off = btf_find_spin_lock(btf, value_type);
772
773         if (map_value_has_spin_lock(map)) {
774                 if (map->map_flags & BPF_F_RDONLY_PROG)
775                         return -EACCES;
776                 if (map->map_type != BPF_MAP_TYPE_HASH &&
777                     map->map_type != BPF_MAP_TYPE_ARRAY &&
778                     map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
779                     map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
780                     map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
781                     map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
782                         return -ENOTSUPP;
783                 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
784                     map->value_size) {
785                         WARN_ONCE(1,
786                                   "verifier bug spin_lock_off %d value_size %d\n",
787                                   map->spin_lock_off, map->value_size);
788                         return -EFAULT;
789                 }
790         }
791
792         if (map->ops->map_check_btf)
793                 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
794
795         return ret;
796 }
797
798 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
799 /* called via syscall */
800 static int map_create(union bpf_attr *attr)
801 {
802         int numa_node = bpf_map_attr_numa_node(attr);
803         struct bpf_map *map;
804         int f_flags;
805         int err;
806
807         err = CHECK_ATTR(BPF_MAP_CREATE);
808         if (err)
809                 return -EINVAL;
810
811         if (attr->btf_vmlinux_value_type_id) {
812                 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
813                     attr->btf_key_type_id || attr->btf_value_type_id)
814                         return -EINVAL;
815         } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
816                 return -EINVAL;
817         }
818
819         f_flags = bpf_get_file_flag(attr->map_flags);
820         if (f_flags < 0)
821                 return f_flags;
822
823         if (numa_node != NUMA_NO_NODE &&
824             ((unsigned int)numa_node >= nr_node_ids ||
825              !node_online(numa_node)))
826                 return -EINVAL;
827
828         /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
829         map = find_and_alloc_map(attr);
830         if (IS_ERR(map))
831                 return PTR_ERR(map);
832
833         err = bpf_obj_name_cpy(map->name, attr->map_name,
834                                sizeof(attr->map_name));
835         if (err < 0)
836                 goto free_map;
837
838         atomic64_set(&map->refcnt, 1);
839         atomic64_set(&map->usercnt, 1);
840         mutex_init(&map->freeze_mutex);
841
842         map->spin_lock_off = -EINVAL;
843         if (attr->btf_key_type_id || attr->btf_value_type_id ||
844             /* Even the map's value is a kernel's struct,
845              * the bpf_prog.o must have BTF to begin with
846              * to figure out the corresponding kernel's
847              * counter part.  Thus, attr->btf_fd has
848              * to be valid also.
849              */
850             attr->btf_vmlinux_value_type_id) {
851                 struct btf *btf;
852
853                 btf = btf_get_by_fd(attr->btf_fd);
854                 if (IS_ERR(btf)) {
855                         err = PTR_ERR(btf);
856                         goto free_map;
857                 }
858                 if (btf_is_kernel(btf)) {
859                         btf_put(btf);
860                         err = -EACCES;
861                         goto free_map;
862                 }
863                 map->btf = btf;
864
865                 if (attr->btf_value_type_id) {
866                         err = map_check_btf(map, btf, attr->btf_key_type_id,
867                                             attr->btf_value_type_id);
868                         if (err)
869                                 goto free_map;
870                 }
871
872                 map->btf_key_type_id = attr->btf_key_type_id;
873                 map->btf_value_type_id = attr->btf_value_type_id;
874                 map->btf_vmlinux_value_type_id =
875                         attr->btf_vmlinux_value_type_id;
876         }
877
878         err = security_bpf_map_alloc(map);
879         if (err)
880                 goto free_map;
881
882         err = bpf_map_alloc_id(map);
883         if (err)
884                 goto free_map_sec;
885
886         bpf_map_save_memcg(map);
887
888         err = bpf_map_new_fd(map, f_flags);
889         if (err < 0) {
890                 /* failed to allocate fd.
891                  * bpf_map_put_with_uref() is needed because the above
892                  * bpf_map_alloc_id() has published the map
893                  * to the userspace and the userspace may
894                  * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
895                  */
896                 bpf_map_put_with_uref(map);
897                 return err;
898         }
899
900         return err;
901
902 free_map_sec:
903         security_bpf_map_free(map);
904 free_map:
905         btf_put(map->btf);
906         map->ops->map_free(map);
907         return err;
908 }
909
910 /* if error is returned, fd is released.
911  * On success caller should complete fd access with matching fdput()
912  */
913 struct bpf_map *__bpf_map_get(struct fd f)
914 {
915         if (!f.file)
916                 return ERR_PTR(-EBADF);
917         if (f.file->f_op != &bpf_map_fops) {
918                 fdput(f);
919                 return ERR_PTR(-EINVAL);
920         }
921
922         return f.file->private_data;
923 }
924
925 void bpf_map_inc(struct bpf_map *map)
926 {
927         atomic64_inc(&map->refcnt);
928 }
929 EXPORT_SYMBOL_GPL(bpf_map_inc);
930
931 void bpf_map_inc_with_uref(struct bpf_map *map)
932 {
933         atomic64_inc(&map->refcnt);
934         atomic64_inc(&map->usercnt);
935 }
936 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
937
938 struct bpf_map *bpf_map_get(u32 ufd)
939 {
940         struct fd f = fdget(ufd);
941         struct bpf_map *map;
942
943         map = __bpf_map_get(f);
944         if (IS_ERR(map))
945                 return map;
946
947         bpf_map_inc(map);
948         fdput(f);
949
950         return map;
951 }
952
953 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
954 {
955         struct fd f = fdget(ufd);
956         struct bpf_map *map;
957
958         map = __bpf_map_get(f);
959         if (IS_ERR(map))
960                 return map;
961
962         bpf_map_inc_with_uref(map);
963         fdput(f);
964
965         return map;
966 }
967
968 /* map_idr_lock should have been held */
969 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
970 {
971         int refold;
972
973         refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
974         if (!refold)
975                 return ERR_PTR(-ENOENT);
976         if (uref)
977                 atomic64_inc(&map->usercnt);
978
979         return map;
980 }
981
982 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
983 {
984         spin_lock_bh(&map_idr_lock);
985         map = __bpf_map_inc_not_zero(map, false);
986         spin_unlock_bh(&map_idr_lock);
987
988         return map;
989 }
990 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
991
992 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
993 {
994         return -ENOTSUPP;
995 }
996
997 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
998 {
999         if (key_size)
1000                 return memdup_user(ukey, key_size);
1001
1002         if (ukey)
1003                 return ERR_PTR(-EINVAL);
1004
1005         return NULL;
1006 }
1007
1008 /* last field in 'union bpf_attr' used by this command */
1009 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1010
1011 static int map_lookup_elem(union bpf_attr *attr)
1012 {
1013         void __user *ukey = u64_to_user_ptr(attr->key);
1014         void __user *uvalue = u64_to_user_ptr(attr->value);
1015         int ufd = attr->map_fd;
1016         struct bpf_map *map;
1017         void *key, *value;
1018         u32 value_size;
1019         struct fd f;
1020         int err;
1021
1022         if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1023                 return -EINVAL;
1024
1025         if (attr->flags & ~BPF_F_LOCK)
1026                 return -EINVAL;
1027
1028         f = fdget(ufd);
1029         map = __bpf_map_get(f);
1030         if (IS_ERR(map))
1031                 return PTR_ERR(map);
1032         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1033                 err = -EPERM;
1034                 goto err_put;
1035         }
1036
1037         if ((attr->flags & BPF_F_LOCK) &&
1038             !map_value_has_spin_lock(map)) {
1039                 err = -EINVAL;
1040                 goto err_put;
1041         }
1042
1043         key = __bpf_copy_key(ukey, map->key_size);
1044         if (IS_ERR(key)) {
1045                 err = PTR_ERR(key);
1046                 goto err_put;
1047         }
1048
1049         value_size = bpf_map_value_size(map);
1050
1051         err = -ENOMEM;
1052         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1053         if (!value)
1054                 goto free_key;
1055
1056         err = bpf_map_copy_value(map, key, value, attr->flags);
1057         if (err)
1058                 goto free_value;
1059
1060         err = -EFAULT;
1061         if (copy_to_user(uvalue, value, value_size) != 0)
1062                 goto free_value;
1063
1064         err = 0;
1065
1066 free_value:
1067         kfree(value);
1068 free_key:
1069         kfree(key);
1070 err_put:
1071         fdput(f);
1072         return err;
1073 }
1074
1075
1076 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1077
1078 static int map_update_elem(union bpf_attr *attr)
1079 {
1080         void __user *ukey = u64_to_user_ptr(attr->key);
1081         void __user *uvalue = u64_to_user_ptr(attr->value);
1082         int ufd = attr->map_fd;
1083         struct bpf_map *map;
1084         void *key, *value;
1085         u32 value_size;
1086         struct fd f;
1087         int err;
1088
1089         if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1090                 return -EINVAL;
1091
1092         f = fdget(ufd);
1093         map = __bpf_map_get(f);
1094         if (IS_ERR(map))
1095                 return PTR_ERR(map);
1096         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1097                 err = -EPERM;
1098                 goto err_put;
1099         }
1100
1101         if ((attr->flags & BPF_F_LOCK) &&
1102             !map_value_has_spin_lock(map)) {
1103                 err = -EINVAL;
1104                 goto err_put;
1105         }
1106
1107         key = __bpf_copy_key(ukey, map->key_size);
1108         if (IS_ERR(key)) {
1109                 err = PTR_ERR(key);
1110                 goto err_put;
1111         }
1112
1113         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1114             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1115             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
1116             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
1117                 value_size = round_up(map->value_size, 8) * num_possible_cpus();
1118         else
1119                 value_size = map->value_size;
1120
1121         err = -ENOMEM;
1122         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1123         if (!value)
1124                 goto free_key;
1125
1126         err = -EFAULT;
1127         if (copy_from_user(value, uvalue, value_size) != 0)
1128                 goto free_value;
1129
1130         err = bpf_map_update_value(map, f, key, value, attr->flags);
1131
1132 free_value:
1133         kfree(value);
1134 free_key:
1135         kfree(key);
1136 err_put:
1137         fdput(f);
1138         return err;
1139 }
1140
1141 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1142
1143 static int map_delete_elem(union bpf_attr *attr)
1144 {
1145         void __user *ukey = u64_to_user_ptr(attr->key);
1146         int ufd = attr->map_fd;
1147         struct bpf_map *map;
1148         struct fd f;
1149         void *key;
1150         int err;
1151
1152         if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1153                 return -EINVAL;
1154
1155         f = fdget(ufd);
1156         map = __bpf_map_get(f);
1157         if (IS_ERR(map))
1158                 return PTR_ERR(map);
1159         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1160                 err = -EPERM;
1161                 goto err_put;
1162         }
1163
1164         key = __bpf_copy_key(ukey, map->key_size);
1165         if (IS_ERR(key)) {
1166                 err = PTR_ERR(key);
1167                 goto err_put;
1168         }
1169
1170         if (bpf_map_is_dev_bound(map)) {
1171                 err = bpf_map_offload_delete_elem(map, key);
1172                 goto out;
1173         } else if (IS_FD_PROG_ARRAY(map) ||
1174                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1175                 /* These maps require sleepable context */
1176                 err = map->ops->map_delete_elem(map, key);
1177                 goto out;
1178         }
1179
1180         bpf_disable_instrumentation();
1181         rcu_read_lock();
1182         err = map->ops->map_delete_elem(map, key);
1183         rcu_read_unlock();
1184         bpf_enable_instrumentation();
1185         maybe_wait_bpf_programs(map);
1186 out:
1187         kfree(key);
1188 err_put:
1189         fdput(f);
1190         return err;
1191 }
1192
1193 /* last field in 'union bpf_attr' used by this command */
1194 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1195
1196 static int map_get_next_key(union bpf_attr *attr)
1197 {
1198         void __user *ukey = u64_to_user_ptr(attr->key);
1199         void __user *unext_key = u64_to_user_ptr(attr->next_key);
1200         int ufd = attr->map_fd;
1201         struct bpf_map *map;
1202         void *key, *next_key;
1203         struct fd f;
1204         int err;
1205
1206         if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1207                 return -EINVAL;
1208
1209         f = fdget(ufd);
1210         map = __bpf_map_get(f);
1211         if (IS_ERR(map))
1212                 return PTR_ERR(map);
1213         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1214                 err = -EPERM;
1215                 goto err_put;
1216         }
1217
1218         if (ukey) {
1219                 key = __bpf_copy_key(ukey, map->key_size);
1220                 if (IS_ERR(key)) {
1221                         err = PTR_ERR(key);
1222                         goto err_put;
1223                 }
1224         } else {
1225                 key = NULL;
1226         }
1227
1228         err = -ENOMEM;
1229         next_key = kmalloc(map->key_size, GFP_USER);
1230         if (!next_key)
1231                 goto free_key;
1232
1233         if (bpf_map_is_dev_bound(map)) {
1234                 err = bpf_map_offload_get_next_key(map, key, next_key);
1235                 goto out;
1236         }
1237
1238         rcu_read_lock();
1239         err = map->ops->map_get_next_key(map, key, next_key);
1240         rcu_read_unlock();
1241 out:
1242         if (err)
1243                 goto free_next_key;
1244
1245         err = -EFAULT;
1246         if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1247                 goto free_next_key;
1248
1249         err = 0;
1250
1251 free_next_key:
1252         kfree(next_key);
1253 free_key:
1254         kfree(key);
1255 err_put:
1256         fdput(f);
1257         return err;
1258 }
1259
1260 int generic_map_delete_batch(struct bpf_map *map,
1261                              const union bpf_attr *attr,
1262                              union bpf_attr __user *uattr)
1263 {
1264         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1265         u32 cp, max_count;
1266         int err = 0;
1267         void *key;
1268
1269         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1270                 return -EINVAL;
1271
1272         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1273             !map_value_has_spin_lock(map)) {
1274                 return -EINVAL;
1275         }
1276
1277         max_count = attr->batch.count;
1278         if (!max_count)
1279                 return 0;
1280
1281         key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1282         if (!key)
1283                 return -ENOMEM;
1284
1285         for (cp = 0; cp < max_count; cp++) {
1286                 err = -EFAULT;
1287                 if (copy_from_user(key, keys + cp * map->key_size,
1288                                    map->key_size))
1289                         break;
1290
1291                 if (bpf_map_is_dev_bound(map)) {
1292                         err = bpf_map_offload_delete_elem(map, key);
1293                         break;
1294                 }
1295
1296                 bpf_disable_instrumentation();
1297                 rcu_read_lock();
1298                 err = map->ops->map_delete_elem(map, key);
1299                 rcu_read_unlock();
1300                 bpf_enable_instrumentation();
1301                 maybe_wait_bpf_programs(map);
1302                 if (err)
1303                         break;
1304         }
1305         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1306                 err = -EFAULT;
1307
1308         kfree(key);
1309         return err;
1310 }
1311
1312 int generic_map_update_batch(struct bpf_map *map,
1313                              const union bpf_attr *attr,
1314                              union bpf_attr __user *uattr)
1315 {
1316         void __user *values = u64_to_user_ptr(attr->batch.values);
1317         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1318         u32 value_size, cp, max_count;
1319         int ufd = attr->map_fd;
1320         void *key, *value;
1321         struct fd f;
1322         int err = 0;
1323
1324         f = fdget(ufd);
1325         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1326                 return -EINVAL;
1327
1328         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1329             !map_value_has_spin_lock(map)) {
1330                 return -EINVAL;
1331         }
1332
1333         value_size = bpf_map_value_size(map);
1334
1335         max_count = attr->batch.count;
1336         if (!max_count)
1337                 return 0;
1338
1339         key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1340         if (!key)
1341                 return -ENOMEM;
1342
1343         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1344         if (!value) {
1345                 kfree(key);
1346                 return -ENOMEM;
1347         }
1348
1349         for (cp = 0; cp < max_count; cp++) {
1350                 err = -EFAULT;
1351                 if (copy_from_user(key, keys + cp * map->key_size,
1352                     map->key_size) ||
1353                     copy_from_user(value, values + cp * value_size, value_size))
1354                         break;
1355
1356                 err = bpf_map_update_value(map, f, key, value,
1357                                            attr->batch.elem_flags);
1358
1359                 if (err)
1360                         break;
1361         }
1362
1363         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1364                 err = -EFAULT;
1365
1366         kfree(value);
1367         kfree(key);
1368         return err;
1369 }
1370
1371 #define MAP_LOOKUP_RETRIES 3
1372
1373 int generic_map_lookup_batch(struct bpf_map *map,
1374                                     const union bpf_attr *attr,
1375                                     union bpf_attr __user *uattr)
1376 {
1377         void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1378         void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1379         void __user *values = u64_to_user_ptr(attr->batch.values);
1380         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1381         void *buf, *buf_prevkey, *prev_key, *key, *value;
1382         int err, retry = MAP_LOOKUP_RETRIES;
1383         u32 value_size, cp, max_count;
1384
1385         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1386                 return -EINVAL;
1387
1388         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1389             !map_value_has_spin_lock(map))
1390                 return -EINVAL;
1391
1392         value_size = bpf_map_value_size(map);
1393
1394         max_count = attr->batch.count;
1395         if (!max_count)
1396                 return 0;
1397
1398         if (put_user(0, &uattr->batch.count))
1399                 return -EFAULT;
1400
1401         buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1402         if (!buf_prevkey)
1403                 return -ENOMEM;
1404
1405         buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1406         if (!buf) {
1407                 kfree(buf_prevkey);
1408                 return -ENOMEM;
1409         }
1410
1411         err = -EFAULT;
1412         prev_key = NULL;
1413         if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1414                 goto free_buf;
1415         key = buf;
1416         value = key + map->key_size;
1417         if (ubatch)
1418                 prev_key = buf_prevkey;
1419
1420         for (cp = 0; cp < max_count;) {
1421                 rcu_read_lock();
1422                 err = map->ops->map_get_next_key(map, prev_key, key);
1423                 rcu_read_unlock();
1424                 if (err)
1425                         break;
1426                 err = bpf_map_copy_value(map, key, value,
1427                                          attr->batch.elem_flags);
1428
1429                 if (err == -ENOENT) {
1430                         if (retry) {
1431                                 retry--;
1432                                 continue;
1433                         }
1434                         err = -EINTR;
1435                         break;
1436                 }
1437
1438                 if (err)
1439                         goto free_buf;
1440
1441                 if (copy_to_user(keys + cp * map->key_size, key,
1442                                  map->key_size)) {
1443                         err = -EFAULT;
1444                         goto free_buf;
1445                 }
1446                 if (copy_to_user(values + cp * value_size, value, value_size)) {
1447                         err = -EFAULT;
1448                         goto free_buf;
1449                 }
1450
1451                 if (!prev_key)
1452                         prev_key = buf_prevkey;
1453
1454                 swap(prev_key, key);
1455                 retry = MAP_LOOKUP_RETRIES;
1456                 cp++;
1457         }
1458
1459         if (err == -EFAULT)
1460                 goto free_buf;
1461
1462         if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1463                     (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1464                 err = -EFAULT;
1465
1466 free_buf:
1467         kfree(buf_prevkey);
1468         kfree(buf);
1469         return err;
1470 }
1471
1472 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1473
1474 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1475 {
1476         void __user *ukey = u64_to_user_ptr(attr->key);
1477         void __user *uvalue = u64_to_user_ptr(attr->value);
1478         int ufd = attr->map_fd;
1479         struct bpf_map *map;
1480         void *key, *value;
1481         u32 value_size;
1482         struct fd f;
1483         int err;
1484
1485         if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1486                 return -EINVAL;
1487
1488         f = fdget(ufd);
1489         map = __bpf_map_get(f);
1490         if (IS_ERR(map))
1491                 return PTR_ERR(map);
1492         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1493             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1494                 err = -EPERM;
1495                 goto err_put;
1496         }
1497
1498         key = __bpf_copy_key(ukey, map->key_size);
1499         if (IS_ERR(key)) {
1500                 err = PTR_ERR(key);
1501                 goto err_put;
1502         }
1503
1504         value_size = map->value_size;
1505
1506         err = -ENOMEM;
1507         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1508         if (!value)
1509                 goto free_key;
1510
1511         if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1512             map->map_type == BPF_MAP_TYPE_STACK) {
1513                 err = map->ops->map_pop_elem(map, value);
1514         } else {
1515                 err = -ENOTSUPP;
1516         }
1517
1518         if (err)
1519                 goto free_value;
1520
1521         if (copy_to_user(uvalue, value, value_size) != 0) {
1522                 err = -EFAULT;
1523                 goto free_value;
1524         }
1525
1526         err = 0;
1527
1528 free_value:
1529         kfree(value);
1530 free_key:
1531         kfree(key);
1532 err_put:
1533         fdput(f);
1534         return err;
1535 }
1536
1537 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1538
1539 static int map_freeze(const union bpf_attr *attr)
1540 {
1541         int err = 0, ufd = attr->map_fd;
1542         struct bpf_map *map;
1543         struct fd f;
1544
1545         if (CHECK_ATTR(BPF_MAP_FREEZE))
1546                 return -EINVAL;
1547
1548         f = fdget(ufd);
1549         map = __bpf_map_get(f);
1550         if (IS_ERR(map))
1551                 return PTR_ERR(map);
1552
1553         if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1554                 fdput(f);
1555                 return -ENOTSUPP;
1556         }
1557
1558         mutex_lock(&map->freeze_mutex);
1559
1560         if (map->writecnt) {
1561                 err = -EBUSY;
1562                 goto err_put;
1563         }
1564         if (READ_ONCE(map->frozen)) {
1565                 err = -EBUSY;
1566                 goto err_put;
1567         }
1568         if (!bpf_capable()) {
1569                 err = -EPERM;
1570                 goto err_put;
1571         }
1572
1573         WRITE_ONCE(map->frozen, true);
1574 err_put:
1575         mutex_unlock(&map->freeze_mutex);
1576         fdput(f);
1577         return err;
1578 }
1579
1580 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1581 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1582         [_id] = & _name ## _prog_ops,
1583 #define BPF_MAP_TYPE(_id, _ops)
1584 #define BPF_LINK_TYPE(_id, _name)
1585 #include <linux/bpf_types.h>
1586 #undef BPF_PROG_TYPE
1587 #undef BPF_MAP_TYPE
1588 #undef BPF_LINK_TYPE
1589 };
1590
1591 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1592 {
1593         const struct bpf_prog_ops *ops;
1594
1595         if (type >= ARRAY_SIZE(bpf_prog_types))
1596                 return -EINVAL;
1597         type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1598         ops = bpf_prog_types[type];
1599         if (!ops)
1600                 return -EINVAL;
1601
1602         if (!bpf_prog_is_dev_bound(prog->aux))
1603                 prog->aux->ops = ops;
1604         else
1605                 prog->aux->ops = &bpf_offload_prog_ops;
1606         prog->type = type;
1607         return 0;
1608 }
1609
1610 enum bpf_audit {
1611         BPF_AUDIT_LOAD,
1612         BPF_AUDIT_UNLOAD,
1613         BPF_AUDIT_MAX,
1614 };
1615
1616 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1617         [BPF_AUDIT_LOAD]   = "LOAD",
1618         [BPF_AUDIT_UNLOAD] = "UNLOAD",
1619 };
1620
1621 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1622 {
1623         struct audit_context *ctx = NULL;
1624         struct audit_buffer *ab;
1625
1626         if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1627                 return;
1628         if (audit_enabled == AUDIT_OFF)
1629                 return;
1630         if (op == BPF_AUDIT_LOAD)
1631                 ctx = audit_context();
1632         ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1633         if (unlikely(!ab))
1634                 return;
1635         audit_log_format(ab, "prog-id=%u op=%s",
1636                          prog->aux->id, bpf_audit_str[op]);
1637         audit_log_end(ab);
1638 }
1639
1640 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1641 {
1642         int id;
1643
1644         idr_preload(GFP_KERNEL);
1645         spin_lock_bh(&prog_idr_lock);
1646         id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1647         if (id > 0)
1648                 prog->aux->id = id;
1649         spin_unlock_bh(&prog_idr_lock);
1650         idr_preload_end();
1651
1652         /* id is in [1, INT_MAX) */
1653         if (WARN_ON_ONCE(!id))
1654                 return -ENOSPC;
1655
1656         return id > 0 ? 0 : id;
1657 }
1658
1659 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1660 {
1661         /* cBPF to eBPF migrations are currently not in the idr store.
1662          * Offloaded programs are removed from the store when their device
1663          * disappears - even if someone grabs an fd to them they are unusable,
1664          * simply waiting for refcnt to drop to be freed.
1665          */
1666         if (!prog->aux->id)
1667                 return;
1668
1669         if (do_idr_lock)
1670                 spin_lock_bh(&prog_idr_lock);
1671         else
1672                 __acquire(&prog_idr_lock);
1673
1674         idr_remove(&prog_idr, prog->aux->id);
1675         prog->aux->id = 0;
1676
1677         if (do_idr_lock)
1678                 spin_unlock_bh(&prog_idr_lock);
1679         else
1680                 __release(&prog_idr_lock);
1681 }
1682
1683 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1684 {
1685         struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1686
1687         kvfree(aux->func_info);
1688         kfree(aux->func_info_aux);
1689         free_uid(aux->user);
1690         security_bpf_prog_free(aux);
1691         bpf_prog_free(aux->prog);
1692 }
1693
1694 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1695 {
1696         bpf_prog_kallsyms_del_all(prog);
1697         btf_put(prog->aux->btf);
1698         kvfree(prog->aux->jited_linfo);
1699         kvfree(prog->aux->linfo);
1700         kfree(prog->aux->kfunc_tab);
1701         if (prog->aux->attach_btf)
1702                 btf_put(prog->aux->attach_btf);
1703
1704         if (deferred) {
1705                 if (prog->aux->sleepable)
1706                         call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
1707                 else
1708                         call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1709         } else {
1710                 __bpf_prog_put_rcu(&prog->aux->rcu);
1711         }
1712 }
1713
1714 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1715 {
1716         if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1717                 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1718                 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1719                 /* bpf_prog_free_id() must be called first */
1720                 bpf_prog_free_id(prog, do_idr_lock);
1721                 __bpf_prog_put_noref(prog, true);
1722         }
1723 }
1724
1725 void bpf_prog_put(struct bpf_prog *prog)
1726 {
1727         __bpf_prog_put(prog, true);
1728 }
1729 EXPORT_SYMBOL_GPL(bpf_prog_put);
1730
1731 static int bpf_prog_release(struct inode *inode, struct file *filp)
1732 {
1733         struct bpf_prog *prog = filp->private_data;
1734
1735         bpf_prog_put(prog);
1736         return 0;
1737 }
1738
1739 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1740                                struct bpf_prog_stats *stats)
1741 {
1742         u64 nsecs = 0, cnt = 0, misses = 0;
1743         int cpu;
1744
1745         for_each_possible_cpu(cpu) {
1746                 const struct bpf_prog_stats *st;
1747                 unsigned int start;
1748                 u64 tnsecs, tcnt, tmisses;
1749
1750                 st = per_cpu_ptr(prog->stats, cpu);
1751                 do {
1752                         start = u64_stats_fetch_begin_irq(&st->syncp);
1753                         tnsecs = st->nsecs;
1754                         tcnt = st->cnt;
1755                         tmisses = st->misses;
1756                 } while (u64_stats_fetch_retry_irq(&st->syncp, start));
1757                 nsecs += tnsecs;
1758                 cnt += tcnt;
1759                 misses += tmisses;
1760         }
1761         stats->nsecs = nsecs;
1762         stats->cnt = cnt;
1763         stats->misses = misses;
1764 }
1765
1766 #ifdef CONFIG_PROC_FS
1767 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1768 {
1769         const struct bpf_prog *prog = filp->private_data;
1770         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1771         struct bpf_prog_stats stats;
1772
1773         bpf_prog_get_stats(prog, &stats);
1774         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1775         seq_printf(m,
1776                    "prog_type:\t%u\n"
1777                    "prog_jited:\t%u\n"
1778                    "prog_tag:\t%s\n"
1779                    "memlock:\t%llu\n"
1780                    "prog_id:\t%u\n"
1781                    "run_time_ns:\t%llu\n"
1782                    "run_cnt:\t%llu\n"
1783                    "recursion_misses:\t%llu\n",
1784                    prog->type,
1785                    prog->jited,
1786                    prog_tag,
1787                    prog->pages * 1ULL << PAGE_SHIFT,
1788                    prog->aux->id,
1789                    stats.nsecs,
1790                    stats.cnt,
1791                    stats.misses);
1792 }
1793 #endif
1794
1795 const struct file_operations bpf_prog_fops = {
1796 #ifdef CONFIG_PROC_FS
1797         .show_fdinfo    = bpf_prog_show_fdinfo,
1798 #endif
1799         .release        = bpf_prog_release,
1800         .read           = bpf_dummy_read,
1801         .write          = bpf_dummy_write,
1802 };
1803
1804 int bpf_prog_new_fd(struct bpf_prog *prog)
1805 {
1806         int ret;
1807
1808         ret = security_bpf_prog(prog);
1809         if (ret < 0)
1810                 return ret;
1811
1812         return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1813                                 O_RDWR | O_CLOEXEC);
1814 }
1815
1816 static struct bpf_prog *____bpf_prog_get(struct fd f)
1817 {
1818         if (!f.file)
1819                 return ERR_PTR(-EBADF);
1820         if (f.file->f_op != &bpf_prog_fops) {
1821                 fdput(f);
1822                 return ERR_PTR(-EINVAL);
1823         }
1824
1825         return f.file->private_data;
1826 }
1827
1828 void bpf_prog_add(struct bpf_prog *prog, int i)
1829 {
1830         atomic64_add(i, &prog->aux->refcnt);
1831 }
1832 EXPORT_SYMBOL_GPL(bpf_prog_add);
1833
1834 void bpf_prog_sub(struct bpf_prog *prog, int i)
1835 {
1836         /* Only to be used for undoing previous bpf_prog_add() in some
1837          * error path. We still know that another entity in our call
1838          * path holds a reference to the program, thus atomic_sub() can
1839          * be safely used in such cases!
1840          */
1841         WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1842 }
1843 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1844
1845 void bpf_prog_inc(struct bpf_prog *prog)
1846 {
1847         atomic64_inc(&prog->aux->refcnt);
1848 }
1849 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1850
1851 /* prog_idr_lock should have been held */
1852 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1853 {
1854         int refold;
1855
1856         refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1857
1858         if (!refold)
1859                 return ERR_PTR(-ENOENT);
1860
1861         return prog;
1862 }
1863 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1864
1865 bool bpf_prog_get_ok(struct bpf_prog *prog,
1866                             enum bpf_prog_type *attach_type, bool attach_drv)
1867 {
1868         /* not an attachment, just a refcount inc, always allow */
1869         if (!attach_type)
1870                 return true;
1871
1872         if (prog->type != *attach_type)
1873                 return false;
1874         if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1875                 return false;
1876
1877         return true;
1878 }
1879
1880 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1881                                        bool attach_drv)
1882 {
1883         struct fd f = fdget(ufd);
1884         struct bpf_prog *prog;
1885
1886         prog = ____bpf_prog_get(f);
1887         if (IS_ERR(prog))
1888                 return prog;
1889         if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1890                 prog = ERR_PTR(-EINVAL);
1891                 goto out;
1892         }
1893
1894         bpf_prog_inc(prog);
1895 out:
1896         fdput(f);
1897         return prog;
1898 }
1899
1900 struct bpf_prog *bpf_prog_get(u32 ufd)
1901 {
1902         return __bpf_prog_get(ufd, NULL, false);
1903 }
1904
1905 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1906                                        bool attach_drv)
1907 {
1908         return __bpf_prog_get(ufd, &type, attach_drv);
1909 }
1910 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1911
1912 /* Initially all BPF programs could be loaded w/o specifying
1913  * expected_attach_type. Later for some of them specifying expected_attach_type
1914  * at load time became required so that program could be validated properly.
1915  * Programs of types that are allowed to be loaded both w/ and w/o (for
1916  * backward compatibility) expected_attach_type, should have the default attach
1917  * type assigned to expected_attach_type for the latter case, so that it can be
1918  * validated later at attach time.
1919  *
1920  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1921  * prog type requires it but has some attach types that have to be backward
1922  * compatible.
1923  */
1924 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1925 {
1926         switch (attr->prog_type) {
1927         case BPF_PROG_TYPE_CGROUP_SOCK:
1928                 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1929                  * exist so checking for non-zero is the way to go here.
1930                  */
1931                 if (!attr->expected_attach_type)
1932                         attr->expected_attach_type =
1933                                 BPF_CGROUP_INET_SOCK_CREATE;
1934                 break;
1935         }
1936 }
1937
1938 static int
1939 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1940                            enum bpf_attach_type expected_attach_type,
1941                            struct btf *attach_btf, u32 btf_id,
1942                            struct bpf_prog *dst_prog)
1943 {
1944         if (btf_id) {
1945                 if (btf_id > BTF_MAX_TYPE)
1946                         return -EINVAL;
1947
1948                 if (!attach_btf && !dst_prog)
1949                         return -EINVAL;
1950
1951                 switch (prog_type) {
1952                 case BPF_PROG_TYPE_TRACING:
1953                 case BPF_PROG_TYPE_LSM:
1954                 case BPF_PROG_TYPE_STRUCT_OPS:
1955                 case BPF_PROG_TYPE_EXT:
1956                         break;
1957                 default:
1958                         return -EINVAL;
1959                 }
1960         }
1961
1962         if (attach_btf && (!btf_id || dst_prog))
1963                 return -EINVAL;
1964
1965         if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
1966             prog_type != BPF_PROG_TYPE_EXT)
1967                 return -EINVAL;
1968
1969         switch (prog_type) {
1970         case BPF_PROG_TYPE_CGROUP_SOCK:
1971                 switch (expected_attach_type) {
1972                 case BPF_CGROUP_INET_SOCK_CREATE:
1973                 case BPF_CGROUP_INET_SOCK_RELEASE:
1974                 case BPF_CGROUP_INET4_POST_BIND:
1975                 case BPF_CGROUP_INET6_POST_BIND:
1976                         return 0;
1977                 default:
1978                         return -EINVAL;
1979                 }
1980         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1981                 switch (expected_attach_type) {
1982                 case BPF_CGROUP_INET4_BIND:
1983                 case BPF_CGROUP_INET6_BIND:
1984                 case BPF_CGROUP_INET4_CONNECT:
1985                 case BPF_CGROUP_INET6_CONNECT:
1986                 case BPF_CGROUP_INET4_GETPEERNAME:
1987                 case BPF_CGROUP_INET6_GETPEERNAME:
1988                 case BPF_CGROUP_INET4_GETSOCKNAME:
1989                 case BPF_CGROUP_INET6_GETSOCKNAME:
1990                 case BPF_CGROUP_UDP4_SENDMSG:
1991                 case BPF_CGROUP_UDP6_SENDMSG:
1992                 case BPF_CGROUP_UDP4_RECVMSG:
1993                 case BPF_CGROUP_UDP6_RECVMSG:
1994                         return 0;
1995                 default:
1996                         return -EINVAL;
1997                 }
1998         case BPF_PROG_TYPE_CGROUP_SKB:
1999                 switch (expected_attach_type) {
2000                 case BPF_CGROUP_INET_INGRESS:
2001                 case BPF_CGROUP_INET_EGRESS:
2002                         return 0;
2003                 default:
2004                         return -EINVAL;
2005                 }
2006         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2007                 switch (expected_attach_type) {
2008                 case BPF_CGROUP_SETSOCKOPT:
2009                 case BPF_CGROUP_GETSOCKOPT:
2010                         return 0;
2011                 default:
2012                         return -EINVAL;
2013                 }
2014         case BPF_PROG_TYPE_SK_LOOKUP:
2015                 if (expected_attach_type == BPF_SK_LOOKUP)
2016                         return 0;
2017                 return -EINVAL;
2018         case BPF_PROG_TYPE_EXT:
2019                 if (expected_attach_type)
2020                         return -EINVAL;
2021                 fallthrough;
2022         default:
2023                 return 0;
2024         }
2025 }
2026
2027 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2028 {
2029         switch (prog_type) {
2030         case BPF_PROG_TYPE_SCHED_CLS:
2031         case BPF_PROG_TYPE_SCHED_ACT:
2032         case BPF_PROG_TYPE_XDP:
2033         case BPF_PROG_TYPE_LWT_IN:
2034         case BPF_PROG_TYPE_LWT_OUT:
2035         case BPF_PROG_TYPE_LWT_XMIT:
2036         case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2037         case BPF_PROG_TYPE_SK_SKB:
2038         case BPF_PROG_TYPE_SK_MSG:
2039         case BPF_PROG_TYPE_LIRC_MODE2:
2040         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2041         case BPF_PROG_TYPE_CGROUP_DEVICE:
2042         case BPF_PROG_TYPE_CGROUP_SOCK:
2043         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2044         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2045         case BPF_PROG_TYPE_CGROUP_SYSCTL:
2046         case BPF_PROG_TYPE_SOCK_OPS:
2047         case BPF_PROG_TYPE_EXT: /* extends any prog */
2048                 return true;
2049         case BPF_PROG_TYPE_CGROUP_SKB:
2050                 /* always unpriv */
2051         case BPF_PROG_TYPE_SK_REUSEPORT:
2052                 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2053         default:
2054                 return false;
2055         }
2056 }
2057
2058 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2059 {
2060         switch (prog_type) {
2061         case BPF_PROG_TYPE_KPROBE:
2062         case BPF_PROG_TYPE_TRACEPOINT:
2063         case BPF_PROG_TYPE_PERF_EVENT:
2064         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2065         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2066         case BPF_PROG_TYPE_TRACING:
2067         case BPF_PROG_TYPE_LSM:
2068         case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2069         case BPF_PROG_TYPE_EXT: /* extends any prog */
2070                 return true;
2071         default:
2072                 return false;
2073         }
2074 }
2075
2076 /* last field in 'union bpf_attr' used by this command */
2077 #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
2078
2079 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
2080 {
2081         enum bpf_prog_type type = attr->prog_type;
2082         struct bpf_prog *prog, *dst_prog = NULL;
2083         struct btf *attach_btf = NULL;
2084         int err;
2085         char license[128];
2086         bool is_gpl;
2087
2088         if (CHECK_ATTR(BPF_PROG_LOAD))
2089                 return -EINVAL;
2090
2091         if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2092                                  BPF_F_ANY_ALIGNMENT |
2093                                  BPF_F_TEST_STATE_FREQ |
2094                                  BPF_F_SLEEPABLE |
2095                                  BPF_F_TEST_RND_HI32))
2096                 return -EINVAL;
2097
2098         if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2099             (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2100             !bpf_capable())
2101                 return -EPERM;
2102
2103         /* copy eBPF program license from user space */
2104         if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
2105                               sizeof(license) - 1) < 0)
2106                 return -EFAULT;
2107         license[sizeof(license) - 1] = 0;
2108
2109         /* eBPF programs must be GPL compatible to use GPL-ed functions */
2110         is_gpl = license_is_gpl_compatible(license);
2111
2112         if (attr->insn_cnt == 0 ||
2113             attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2114                 return -E2BIG;
2115         if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2116             type != BPF_PROG_TYPE_CGROUP_SKB &&
2117             !bpf_capable())
2118                 return -EPERM;
2119
2120         if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2121                 return -EPERM;
2122         if (is_perfmon_prog_type(type) && !perfmon_capable())
2123                 return -EPERM;
2124
2125         /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2126          * or btf, we need to check which one it is
2127          */
2128         if (attr->attach_prog_fd) {
2129                 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2130                 if (IS_ERR(dst_prog)) {
2131                         dst_prog = NULL;
2132                         attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2133                         if (IS_ERR(attach_btf))
2134                                 return -EINVAL;
2135                         if (!btf_is_kernel(attach_btf)) {
2136                                 /* attaching through specifying bpf_prog's BTF
2137                                  * objects directly might be supported eventually
2138                                  */
2139                                 btf_put(attach_btf);
2140                                 return -ENOTSUPP;
2141                         }
2142                 }
2143         } else if (attr->attach_btf_id) {
2144                 /* fall back to vmlinux BTF, if BTF type ID is specified */
2145                 attach_btf = bpf_get_btf_vmlinux();
2146                 if (IS_ERR(attach_btf))
2147                         return PTR_ERR(attach_btf);
2148                 if (!attach_btf)
2149                         return -EINVAL;
2150                 btf_get(attach_btf);
2151         }
2152
2153         bpf_prog_load_fixup_attach_type(attr);
2154         if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2155                                        attach_btf, attr->attach_btf_id,
2156                                        dst_prog)) {
2157                 if (dst_prog)
2158                         bpf_prog_put(dst_prog);
2159                 if (attach_btf)
2160                         btf_put(attach_btf);
2161                 return -EINVAL;
2162         }
2163
2164         /* plain bpf_prog allocation */
2165         prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2166         if (!prog) {
2167                 if (dst_prog)
2168                         bpf_prog_put(dst_prog);
2169                 if (attach_btf)
2170                         btf_put(attach_btf);
2171                 return -ENOMEM;
2172         }
2173
2174         prog->expected_attach_type = attr->expected_attach_type;
2175         prog->aux->attach_btf = attach_btf;
2176         prog->aux->attach_btf_id = attr->attach_btf_id;
2177         prog->aux->dst_prog = dst_prog;
2178         prog->aux->offload_requested = !!attr->prog_ifindex;
2179         prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2180
2181         err = security_bpf_prog_alloc(prog->aux);
2182         if (err)
2183                 goto free_prog;
2184
2185         prog->aux->user = get_current_user();
2186         prog->len = attr->insn_cnt;
2187
2188         err = -EFAULT;
2189         if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
2190                            bpf_prog_insn_size(prog)) != 0)
2191                 goto free_prog_sec;
2192
2193         prog->orig_prog = NULL;
2194         prog->jited = 0;
2195
2196         atomic64_set(&prog->aux->refcnt, 1);
2197         prog->gpl_compatible = is_gpl ? 1 : 0;
2198
2199         if (bpf_prog_is_dev_bound(prog->aux)) {
2200                 err = bpf_prog_offload_init(prog, attr);
2201                 if (err)
2202                         goto free_prog_sec;
2203         }
2204
2205         /* find program type: socket_filter vs tracing_filter */
2206         err = find_prog_type(type, prog);
2207         if (err < 0)
2208                 goto free_prog_sec;
2209
2210         prog->aux->load_time = ktime_get_boottime_ns();
2211         err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2212                                sizeof(attr->prog_name));
2213         if (err < 0)
2214                 goto free_prog_sec;
2215
2216         /* run eBPF verifier */
2217         err = bpf_check(&prog, attr, uattr);
2218         if (err < 0)
2219                 goto free_used_maps;
2220
2221         prog = bpf_prog_select_runtime(prog, &err);
2222         if (err < 0)
2223                 goto free_used_maps;
2224
2225         err = bpf_prog_alloc_id(prog);
2226         if (err)
2227                 goto free_used_maps;
2228
2229         /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2230          * effectively publicly exposed. However, retrieving via
2231          * bpf_prog_get_fd_by_id() will take another reference,
2232          * therefore it cannot be gone underneath us.
2233          *
2234          * Only for the time /after/ successful bpf_prog_new_fd()
2235          * and before returning to userspace, we might just hold
2236          * one reference and any parallel close on that fd could
2237          * rip everything out. Hence, below notifications must
2238          * happen before bpf_prog_new_fd().
2239          *
2240          * Also, any failure handling from this point onwards must
2241          * be using bpf_prog_put() given the program is exposed.
2242          */
2243         bpf_prog_kallsyms_add(prog);
2244         perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2245         bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2246
2247         err = bpf_prog_new_fd(prog);
2248         if (err < 0)
2249                 bpf_prog_put(prog);
2250         return err;
2251
2252 free_used_maps:
2253         /* In case we have subprogs, we need to wait for a grace
2254          * period before we can tear down JIT memory since symbols
2255          * are already exposed under kallsyms.
2256          */
2257         __bpf_prog_put_noref(prog, prog->aux->func_cnt);
2258         return err;
2259 free_prog_sec:
2260         free_uid(prog->aux->user);
2261         security_bpf_prog_free(prog->aux);
2262 free_prog:
2263         if (prog->aux->attach_btf)
2264                 btf_put(prog->aux->attach_btf);
2265         bpf_prog_free(prog);
2266         return err;
2267 }
2268
2269 #define BPF_OBJ_LAST_FIELD file_flags
2270
2271 static int bpf_obj_pin(const union bpf_attr *attr)
2272 {
2273         if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2274                 return -EINVAL;
2275
2276         return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2277 }
2278
2279 static int bpf_obj_get(const union bpf_attr *attr)
2280 {
2281         if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2282             attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2283                 return -EINVAL;
2284
2285         return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2286                                 attr->file_flags);
2287 }
2288
2289 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2290                    const struct bpf_link_ops *ops, struct bpf_prog *prog)
2291 {
2292         atomic64_set(&link->refcnt, 1);
2293         link->type = type;
2294         link->id = 0;
2295         link->ops = ops;
2296         link->prog = prog;
2297 }
2298
2299 static void bpf_link_free_id(int id)
2300 {
2301         if (!id)
2302                 return;
2303
2304         spin_lock_bh(&link_idr_lock);
2305         idr_remove(&link_idr, id);
2306         spin_unlock_bh(&link_idr_lock);
2307 }
2308
2309 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2310  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2311  * anon_inode's release() call. This helper marksbpf_link as
2312  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2313  * is not decremented, it's the responsibility of a calling code that failed
2314  * to complete bpf_link initialization.
2315  */
2316 void bpf_link_cleanup(struct bpf_link_primer *primer)
2317 {
2318         primer->link->prog = NULL;
2319         bpf_link_free_id(primer->id);
2320         fput(primer->file);
2321         put_unused_fd(primer->fd);
2322 }
2323
2324 void bpf_link_inc(struct bpf_link *link)
2325 {
2326         atomic64_inc(&link->refcnt);
2327 }
2328
2329 /* bpf_link_free is guaranteed to be called from process context */
2330 static void bpf_link_free(struct bpf_link *link)
2331 {
2332         bpf_link_free_id(link->id);
2333         if (link->prog) {
2334                 /* detach BPF program, clean up used resources */
2335                 link->ops->release(link);
2336                 bpf_prog_put(link->prog);
2337         }
2338         /* free bpf_link and its containing memory */
2339         link->ops->dealloc(link);
2340 }
2341
2342 static void bpf_link_put_deferred(struct work_struct *work)
2343 {
2344         struct bpf_link *link = container_of(work, struct bpf_link, work);
2345
2346         bpf_link_free(link);
2347 }
2348
2349 /* bpf_link_put can be called from atomic context, but ensures that resources
2350  * are freed from process context
2351  */
2352 void bpf_link_put(struct bpf_link *link)
2353 {
2354         if (!atomic64_dec_and_test(&link->refcnt))
2355                 return;
2356
2357         if (in_atomic()) {
2358                 INIT_WORK(&link->work, bpf_link_put_deferred);
2359                 schedule_work(&link->work);
2360         } else {
2361                 bpf_link_free(link);
2362         }
2363 }
2364
2365 static int bpf_link_release(struct inode *inode, struct file *filp)
2366 {
2367         struct bpf_link *link = filp->private_data;
2368
2369         bpf_link_put(link);
2370         return 0;
2371 }
2372
2373 #ifdef CONFIG_PROC_FS
2374 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2375 #define BPF_MAP_TYPE(_id, _ops)
2376 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2377 static const char *bpf_link_type_strs[] = {
2378         [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2379 #include <linux/bpf_types.h>
2380 };
2381 #undef BPF_PROG_TYPE
2382 #undef BPF_MAP_TYPE
2383 #undef BPF_LINK_TYPE
2384
2385 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2386 {
2387         const struct bpf_link *link = filp->private_data;
2388         const struct bpf_prog *prog = link->prog;
2389         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2390
2391         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2392         seq_printf(m,
2393                    "link_type:\t%s\n"
2394                    "link_id:\t%u\n"
2395                    "prog_tag:\t%s\n"
2396                    "prog_id:\t%u\n",
2397                    bpf_link_type_strs[link->type],
2398                    link->id,
2399                    prog_tag,
2400                    prog->aux->id);
2401         if (link->ops->show_fdinfo)
2402                 link->ops->show_fdinfo(link, m);
2403 }
2404 #endif
2405
2406 static const struct file_operations bpf_link_fops = {
2407 #ifdef CONFIG_PROC_FS
2408         .show_fdinfo    = bpf_link_show_fdinfo,
2409 #endif
2410         .release        = bpf_link_release,
2411         .read           = bpf_dummy_read,
2412         .write          = bpf_dummy_write,
2413 };
2414
2415 static int bpf_link_alloc_id(struct bpf_link *link)
2416 {
2417         int id;
2418
2419         idr_preload(GFP_KERNEL);
2420         spin_lock_bh(&link_idr_lock);
2421         id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2422         spin_unlock_bh(&link_idr_lock);
2423         idr_preload_end();
2424
2425         return id;
2426 }
2427
2428 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2429  * reserving unused FD and allocating ID from link_idr. This is to be paired
2430  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2431  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2432  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2433  * transient state is passed around in struct bpf_link_primer.
2434  * This is preferred way to create and initialize bpf_link, especially when
2435  * there are complicated and expensive operations inbetween creating bpf_link
2436  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2437  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2438  * expensive (and potentially failing) roll back operations in a rare case
2439  * that file, FD, or ID can't be allocated.
2440  */
2441 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2442 {
2443         struct file *file;
2444         int fd, id;
2445
2446         fd = get_unused_fd_flags(O_CLOEXEC);
2447         if (fd < 0)
2448                 return fd;
2449
2450
2451         id = bpf_link_alloc_id(link);
2452         if (id < 0) {
2453                 put_unused_fd(fd);
2454                 return id;
2455         }
2456
2457         file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2458         if (IS_ERR(file)) {
2459                 bpf_link_free_id(id);
2460                 put_unused_fd(fd);
2461                 return PTR_ERR(file);
2462         }
2463
2464         primer->link = link;
2465         primer->file = file;
2466         primer->fd = fd;
2467         primer->id = id;
2468         return 0;
2469 }
2470
2471 int bpf_link_settle(struct bpf_link_primer *primer)
2472 {
2473         /* make bpf_link fetchable by ID */
2474         spin_lock_bh(&link_idr_lock);
2475         primer->link->id = primer->id;
2476         spin_unlock_bh(&link_idr_lock);
2477         /* make bpf_link fetchable by FD */
2478         fd_install(primer->fd, primer->file);
2479         /* pass through installed FD */
2480         return primer->fd;
2481 }
2482
2483 int bpf_link_new_fd(struct bpf_link *link)
2484 {
2485         return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2486 }
2487
2488 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2489 {
2490         struct fd f = fdget(ufd);
2491         struct bpf_link *link;
2492
2493         if (!f.file)
2494                 return ERR_PTR(-EBADF);
2495         if (f.file->f_op != &bpf_link_fops) {
2496                 fdput(f);
2497                 return ERR_PTR(-EINVAL);
2498         }
2499
2500         link = f.file->private_data;
2501         bpf_link_inc(link);
2502         fdput(f);
2503
2504         return link;
2505 }
2506
2507 struct bpf_tracing_link {
2508         struct bpf_link link;
2509         enum bpf_attach_type attach_type;
2510         struct bpf_trampoline *trampoline;
2511         struct bpf_prog *tgt_prog;
2512 };
2513
2514 static void bpf_tracing_link_release(struct bpf_link *link)
2515 {
2516         struct bpf_tracing_link *tr_link =
2517                 container_of(link, struct bpf_tracing_link, link);
2518
2519         WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
2520                                                 tr_link->trampoline));
2521
2522         bpf_trampoline_put(tr_link->trampoline);
2523
2524         /* tgt_prog is NULL if target is a kernel function */
2525         if (tr_link->tgt_prog)
2526                 bpf_prog_put(tr_link->tgt_prog);
2527 }
2528
2529 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2530 {
2531         struct bpf_tracing_link *tr_link =
2532                 container_of(link, struct bpf_tracing_link, link);
2533
2534         kfree(tr_link);
2535 }
2536
2537 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2538                                          struct seq_file *seq)
2539 {
2540         struct bpf_tracing_link *tr_link =
2541                 container_of(link, struct bpf_tracing_link, link);
2542
2543         seq_printf(seq,
2544                    "attach_type:\t%d\n",
2545                    tr_link->attach_type);
2546 }
2547
2548 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2549                                            struct bpf_link_info *info)
2550 {
2551         struct bpf_tracing_link *tr_link =
2552                 container_of(link, struct bpf_tracing_link, link);
2553
2554         info->tracing.attach_type = tr_link->attach_type;
2555         bpf_trampoline_unpack_key(tr_link->trampoline->key,
2556                                   &info->tracing.target_obj_id,
2557                                   &info->tracing.target_btf_id);
2558
2559         return 0;
2560 }
2561
2562 static const struct bpf_link_ops bpf_tracing_link_lops = {
2563         .release = bpf_tracing_link_release,
2564         .dealloc = bpf_tracing_link_dealloc,
2565         .show_fdinfo = bpf_tracing_link_show_fdinfo,
2566         .fill_link_info = bpf_tracing_link_fill_link_info,
2567 };
2568
2569 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
2570                                    int tgt_prog_fd,
2571                                    u32 btf_id)
2572 {
2573         struct bpf_link_primer link_primer;
2574         struct bpf_prog *tgt_prog = NULL;
2575         struct bpf_trampoline *tr = NULL;
2576         struct bpf_tracing_link *link;
2577         u64 key = 0;
2578         int err;
2579
2580         switch (prog->type) {
2581         case BPF_PROG_TYPE_TRACING:
2582                 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2583                     prog->expected_attach_type != BPF_TRACE_FEXIT &&
2584                     prog->expected_attach_type != BPF_MODIFY_RETURN) {
2585                         err = -EINVAL;
2586                         goto out_put_prog;
2587                 }
2588                 break;
2589         case BPF_PROG_TYPE_EXT:
2590                 if (prog->expected_attach_type != 0) {
2591                         err = -EINVAL;
2592                         goto out_put_prog;
2593                 }
2594                 break;
2595         case BPF_PROG_TYPE_LSM:
2596                 if (prog->expected_attach_type != BPF_LSM_MAC) {
2597                         err = -EINVAL;
2598                         goto out_put_prog;
2599                 }
2600                 break;
2601         default:
2602                 err = -EINVAL;
2603                 goto out_put_prog;
2604         }
2605
2606         if (!!tgt_prog_fd != !!btf_id) {
2607                 err = -EINVAL;
2608                 goto out_put_prog;
2609         }
2610
2611         if (tgt_prog_fd) {
2612                 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */
2613                 if (prog->type != BPF_PROG_TYPE_EXT) {
2614                         err = -EINVAL;
2615                         goto out_put_prog;
2616                 }
2617
2618                 tgt_prog = bpf_prog_get(tgt_prog_fd);
2619                 if (IS_ERR(tgt_prog)) {
2620                         err = PTR_ERR(tgt_prog);
2621                         tgt_prog = NULL;
2622                         goto out_put_prog;
2623                 }
2624
2625                 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
2626         }
2627
2628         link = kzalloc(sizeof(*link), GFP_USER);
2629         if (!link) {
2630                 err = -ENOMEM;
2631                 goto out_put_prog;
2632         }
2633         bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2634                       &bpf_tracing_link_lops, prog);
2635         link->attach_type = prog->expected_attach_type;
2636
2637         mutex_lock(&prog->aux->dst_mutex);
2638
2639         /* There are a few possible cases here:
2640          *
2641          * - if prog->aux->dst_trampoline is set, the program was just loaded
2642          *   and not yet attached to anything, so we can use the values stored
2643          *   in prog->aux
2644          *
2645          * - if prog->aux->dst_trampoline is NULL, the program has already been
2646          *   attached to a target and its initial target was cleared (below)
2647          *
2648          * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
2649          *   target_btf_id using the link_create API.
2650          *
2651          * - if tgt_prog == NULL when this function was called using the old
2652          *   raw_tracepoint_open API, and we need a target from prog->aux
2653          *
2654          * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
2655          *   was detached and is going for re-attachment.
2656          */
2657         if (!prog->aux->dst_trampoline && !tgt_prog) {
2658                 /*
2659                  * Allow re-attach for TRACING and LSM programs. If it's
2660                  * currently linked, bpf_trampoline_link_prog will fail.
2661                  * EXT programs need to specify tgt_prog_fd, so they
2662                  * re-attach in separate code path.
2663                  */
2664                 if (prog->type != BPF_PROG_TYPE_TRACING &&
2665                     prog->type != BPF_PROG_TYPE_LSM) {
2666                         err = -EINVAL;
2667                         goto out_unlock;
2668                 }
2669                 btf_id = prog->aux->attach_btf_id;
2670                 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
2671         }
2672
2673         if (!prog->aux->dst_trampoline ||
2674             (key && key != prog->aux->dst_trampoline->key)) {
2675                 /* If there is no saved target, or the specified target is
2676                  * different from the destination specified at load time, we
2677                  * need a new trampoline and a check for compatibility
2678                  */
2679                 struct bpf_attach_target_info tgt_info = {};
2680
2681                 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
2682                                               &tgt_info);
2683                 if (err)
2684                         goto out_unlock;
2685
2686                 tr = bpf_trampoline_get(key, &tgt_info);
2687                 if (!tr) {
2688                         err = -ENOMEM;
2689                         goto out_unlock;
2690                 }
2691         } else {
2692                 /* The caller didn't specify a target, or the target was the
2693                  * same as the destination supplied during program load. This
2694                  * means we can reuse the trampoline and reference from program
2695                  * load time, and there is no need to allocate a new one. This
2696                  * can only happen once for any program, as the saved values in
2697                  * prog->aux are cleared below.
2698                  */
2699                 tr = prog->aux->dst_trampoline;
2700                 tgt_prog = prog->aux->dst_prog;
2701         }
2702
2703         err = bpf_link_prime(&link->link, &link_primer);
2704         if (err)
2705                 goto out_unlock;
2706
2707         err = bpf_trampoline_link_prog(prog, tr);
2708         if (err) {
2709                 bpf_link_cleanup(&link_primer);
2710                 link = NULL;
2711                 goto out_unlock;
2712         }
2713
2714         link->tgt_prog = tgt_prog;
2715         link->trampoline = tr;
2716
2717         /* Always clear the trampoline and target prog from prog->aux to make
2718          * sure the original attach destination is not kept alive after a
2719          * program is (re-)attached to another target.
2720          */
2721         if (prog->aux->dst_prog &&
2722             (tgt_prog_fd || tr != prog->aux->dst_trampoline))
2723                 /* got extra prog ref from syscall, or attaching to different prog */
2724                 bpf_prog_put(prog->aux->dst_prog);
2725         if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
2726                 /* we allocated a new trampoline, so free the old one */
2727                 bpf_trampoline_put(prog->aux->dst_trampoline);
2728
2729         prog->aux->dst_prog = NULL;
2730         prog->aux->dst_trampoline = NULL;
2731         mutex_unlock(&prog->aux->dst_mutex);
2732
2733         return bpf_link_settle(&link_primer);
2734 out_unlock:
2735         if (tr && tr != prog->aux->dst_trampoline)
2736                 bpf_trampoline_put(tr);
2737         mutex_unlock(&prog->aux->dst_mutex);
2738         kfree(link);
2739 out_put_prog:
2740         if (tgt_prog_fd && tgt_prog)
2741                 bpf_prog_put(tgt_prog);
2742         return err;
2743 }
2744
2745 struct bpf_raw_tp_link {
2746         struct bpf_link link;
2747         struct bpf_raw_event_map *btp;
2748 };
2749
2750 static void bpf_raw_tp_link_release(struct bpf_link *link)
2751 {
2752         struct bpf_raw_tp_link *raw_tp =
2753                 container_of(link, struct bpf_raw_tp_link, link);
2754
2755         bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2756         bpf_put_raw_tracepoint(raw_tp->btp);
2757 }
2758
2759 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2760 {
2761         struct bpf_raw_tp_link *raw_tp =
2762                 container_of(link, struct bpf_raw_tp_link, link);
2763
2764         kfree(raw_tp);
2765 }
2766
2767 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2768                                         struct seq_file *seq)
2769 {
2770         struct bpf_raw_tp_link *raw_tp_link =
2771                 container_of(link, struct bpf_raw_tp_link, link);
2772
2773         seq_printf(seq,
2774                    "tp_name:\t%s\n",
2775                    raw_tp_link->btp->tp->name);
2776 }
2777
2778 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2779                                           struct bpf_link_info *info)
2780 {
2781         struct bpf_raw_tp_link *raw_tp_link =
2782                 container_of(link, struct bpf_raw_tp_link, link);
2783         char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2784         const char *tp_name = raw_tp_link->btp->tp->name;
2785         u32 ulen = info->raw_tracepoint.tp_name_len;
2786         size_t tp_len = strlen(tp_name);
2787
2788         if (!ulen ^ !ubuf)
2789                 return -EINVAL;
2790
2791         info->raw_tracepoint.tp_name_len = tp_len + 1;
2792
2793         if (!ubuf)
2794                 return 0;
2795
2796         if (ulen >= tp_len + 1) {
2797                 if (copy_to_user(ubuf, tp_name, tp_len + 1))
2798                         return -EFAULT;
2799         } else {
2800                 char zero = '\0';
2801
2802                 if (copy_to_user(ubuf, tp_name, ulen - 1))
2803                         return -EFAULT;
2804                 if (put_user(zero, ubuf + ulen - 1))
2805                         return -EFAULT;
2806                 return -ENOSPC;
2807         }
2808
2809         return 0;
2810 }
2811
2812 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2813         .release = bpf_raw_tp_link_release,
2814         .dealloc = bpf_raw_tp_link_dealloc,
2815         .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2816         .fill_link_info = bpf_raw_tp_link_fill_link_info,
2817 };
2818
2819 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2820
2821 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2822 {
2823         struct bpf_link_primer link_primer;
2824         struct bpf_raw_tp_link *link;
2825         struct bpf_raw_event_map *btp;
2826         struct bpf_prog *prog;
2827         const char *tp_name;
2828         char buf[128];
2829         int err;
2830
2831         if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2832                 return -EINVAL;
2833
2834         prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2835         if (IS_ERR(prog))
2836                 return PTR_ERR(prog);
2837
2838         switch (prog->type) {
2839         case BPF_PROG_TYPE_TRACING:
2840         case BPF_PROG_TYPE_EXT:
2841         case BPF_PROG_TYPE_LSM:
2842                 if (attr->raw_tracepoint.name) {
2843                         /* The attach point for this category of programs
2844                          * should be specified via btf_id during program load.
2845                          */
2846                         err = -EINVAL;
2847                         goto out_put_prog;
2848                 }
2849                 if (prog->type == BPF_PROG_TYPE_TRACING &&
2850                     prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2851                         tp_name = prog->aux->attach_func_name;
2852                         break;
2853                 }
2854                 err = bpf_tracing_prog_attach(prog, 0, 0);
2855                 if (err >= 0)
2856                         return err;
2857                 goto out_put_prog;
2858         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2859         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2860                 if (strncpy_from_user(buf,
2861                                       u64_to_user_ptr(attr->raw_tracepoint.name),
2862                                       sizeof(buf) - 1) < 0) {
2863                         err = -EFAULT;
2864                         goto out_put_prog;
2865                 }
2866                 buf[sizeof(buf) - 1] = 0;
2867                 tp_name = buf;
2868                 break;
2869         default:
2870                 err = -EINVAL;
2871                 goto out_put_prog;
2872         }
2873
2874         btp = bpf_get_raw_tracepoint(tp_name);
2875         if (!btp) {
2876                 err = -ENOENT;
2877                 goto out_put_prog;
2878         }
2879
2880         link = kzalloc(sizeof(*link), GFP_USER);
2881         if (!link) {
2882                 err = -ENOMEM;
2883                 goto out_put_btp;
2884         }
2885         bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
2886                       &bpf_raw_tp_link_lops, prog);
2887         link->btp = btp;
2888
2889         err = bpf_link_prime(&link->link, &link_primer);
2890         if (err) {
2891                 kfree(link);
2892                 goto out_put_btp;
2893         }
2894
2895         err = bpf_probe_register(link->btp, prog);
2896         if (err) {
2897                 bpf_link_cleanup(&link_primer);
2898                 goto out_put_btp;
2899         }
2900
2901         return bpf_link_settle(&link_primer);
2902
2903 out_put_btp:
2904         bpf_put_raw_tracepoint(btp);
2905 out_put_prog:
2906         bpf_prog_put(prog);
2907         return err;
2908 }
2909
2910 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2911                                              enum bpf_attach_type attach_type)
2912 {
2913         switch (prog->type) {
2914         case BPF_PROG_TYPE_CGROUP_SOCK:
2915         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2916         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2917         case BPF_PROG_TYPE_SK_LOOKUP:
2918                 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2919         case BPF_PROG_TYPE_CGROUP_SKB:
2920                 if (!capable(CAP_NET_ADMIN))
2921                         /* cg-skb progs can be loaded by unpriv user.
2922                          * check permissions at attach time.
2923                          */
2924                         return -EPERM;
2925                 return prog->enforce_expected_attach_type &&
2926                         prog->expected_attach_type != attach_type ?
2927                         -EINVAL : 0;
2928         default:
2929                 return 0;
2930         }
2931 }
2932
2933 static enum bpf_prog_type
2934 attach_type_to_prog_type(enum bpf_attach_type attach_type)
2935 {
2936         switch (attach_type) {
2937         case BPF_CGROUP_INET_INGRESS:
2938         case BPF_CGROUP_INET_EGRESS:
2939                 return BPF_PROG_TYPE_CGROUP_SKB;
2940         case BPF_CGROUP_INET_SOCK_CREATE:
2941         case BPF_CGROUP_INET_SOCK_RELEASE:
2942         case BPF_CGROUP_INET4_POST_BIND:
2943         case BPF_CGROUP_INET6_POST_BIND:
2944                 return BPF_PROG_TYPE_CGROUP_SOCK;
2945         case BPF_CGROUP_INET4_BIND:
2946         case BPF_CGROUP_INET6_BIND:
2947         case BPF_CGROUP_INET4_CONNECT:
2948         case BPF_CGROUP_INET6_CONNECT:
2949         case BPF_CGROUP_INET4_GETPEERNAME:
2950         case BPF_CGROUP_INET6_GETPEERNAME:
2951         case BPF_CGROUP_INET4_GETSOCKNAME:
2952         case BPF_CGROUP_INET6_GETSOCKNAME:
2953         case BPF_CGROUP_UDP4_SENDMSG:
2954         case BPF_CGROUP_UDP6_SENDMSG:
2955         case BPF_CGROUP_UDP4_RECVMSG:
2956         case BPF_CGROUP_UDP6_RECVMSG:
2957                 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2958         case BPF_CGROUP_SOCK_OPS:
2959                 return BPF_PROG_TYPE_SOCK_OPS;
2960         case BPF_CGROUP_DEVICE:
2961                 return BPF_PROG_TYPE_CGROUP_DEVICE;
2962         case BPF_SK_MSG_VERDICT:
2963                 return BPF_PROG_TYPE_SK_MSG;
2964         case BPF_SK_SKB_STREAM_PARSER:
2965         case BPF_SK_SKB_STREAM_VERDICT:
2966         case BPF_SK_SKB_VERDICT:
2967                 return BPF_PROG_TYPE_SK_SKB;
2968         case BPF_LIRC_MODE2:
2969                 return BPF_PROG_TYPE_LIRC_MODE2;
2970         case BPF_FLOW_DISSECTOR:
2971                 return BPF_PROG_TYPE_FLOW_DISSECTOR;
2972         case BPF_CGROUP_SYSCTL:
2973                 return BPF_PROG_TYPE_CGROUP_SYSCTL;
2974         case BPF_CGROUP_GETSOCKOPT:
2975         case BPF_CGROUP_SETSOCKOPT:
2976                 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
2977         case BPF_TRACE_ITER:
2978                 return BPF_PROG_TYPE_TRACING;
2979         case BPF_SK_LOOKUP:
2980                 return BPF_PROG_TYPE_SK_LOOKUP;
2981         case BPF_XDP:
2982                 return BPF_PROG_TYPE_XDP;
2983         default:
2984                 return BPF_PROG_TYPE_UNSPEC;
2985         }
2986 }
2987
2988 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
2989
2990 #define BPF_F_ATTACH_MASK \
2991         (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
2992
2993 static int bpf_prog_attach(const union bpf_attr *attr)
2994 {
2995         enum bpf_prog_type ptype;
2996         struct bpf_prog *prog;
2997         int ret;
2998
2999         if (CHECK_ATTR(BPF_PROG_ATTACH))
3000                 return -EINVAL;
3001
3002         if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
3003                 return -EINVAL;
3004
3005         ptype = attach_type_to_prog_type(attr->attach_type);
3006         if (ptype == BPF_PROG_TYPE_UNSPEC)
3007                 return -EINVAL;
3008
3009         prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3010         if (IS_ERR(prog))
3011                 return PTR_ERR(prog);
3012
3013         if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3014                 bpf_prog_put(prog);
3015                 return -EINVAL;
3016         }
3017
3018         switch (ptype) {
3019         case BPF_PROG_TYPE_SK_SKB:
3020         case BPF_PROG_TYPE_SK_MSG:
3021                 ret = sock_map_get_from_fd(attr, prog);
3022                 break;
3023         case BPF_PROG_TYPE_LIRC_MODE2:
3024                 ret = lirc_prog_attach(attr, prog);
3025                 break;
3026         case BPF_PROG_TYPE_FLOW_DISSECTOR:
3027                 ret = netns_bpf_prog_attach(attr, prog);
3028                 break;
3029         case BPF_PROG_TYPE_CGROUP_DEVICE:
3030         case BPF_PROG_TYPE_CGROUP_SKB:
3031         case BPF_PROG_TYPE_CGROUP_SOCK:
3032         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3033         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3034         case BPF_PROG_TYPE_CGROUP_SYSCTL:
3035         case BPF_PROG_TYPE_SOCK_OPS:
3036                 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3037                 break;
3038         default:
3039                 ret = -EINVAL;
3040         }
3041
3042         if (ret)
3043                 bpf_prog_put(prog);
3044         return ret;
3045 }
3046
3047 #define BPF_PROG_DETACH_LAST_FIELD attach_type
3048
3049 static int bpf_prog_detach(const union bpf_attr *attr)
3050 {
3051         enum bpf_prog_type ptype;
3052
3053         if (CHECK_ATTR(BPF_PROG_DETACH))
3054                 return -EINVAL;
3055
3056         ptype = attach_type_to_prog_type(attr->attach_type);
3057
3058         switch (ptype) {
3059         case BPF_PROG_TYPE_SK_MSG:
3060         case BPF_PROG_TYPE_SK_SKB:
3061                 return sock_map_prog_detach(attr, ptype);
3062         case BPF_PROG_TYPE_LIRC_MODE2:
3063                 return lirc_prog_detach(attr);
3064         case BPF_PROG_TYPE_FLOW_DISSECTOR:
3065                 return netns_bpf_prog_detach(attr, ptype);
3066         case BPF_PROG_TYPE_CGROUP_DEVICE:
3067         case BPF_PROG_TYPE_CGROUP_SKB:
3068         case BPF_PROG_TYPE_CGROUP_SOCK:
3069         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3070         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3071         case BPF_PROG_TYPE_CGROUP_SYSCTL:
3072         case BPF_PROG_TYPE_SOCK_OPS:
3073                 return cgroup_bpf_prog_detach(attr, ptype);
3074         default:
3075                 return -EINVAL;
3076         }
3077 }
3078
3079 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
3080
3081 static int bpf_prog_query(const union bpf_attr *attr,
3082                           union bpf_attr __user *uattr)
3083 {
3084         if (!capable(CAP_NET_ADMIN))
3085                 return -EPERM;
3086         if (CHECK_ATTR(BPF_PROG_QUERY))
3087                 return -EINVAL;
3088         if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3089                 return -EINVAL;
3090
3091         switch (attr->query.attach_type) {
3092         case BPF_CGROUP_INET_INGRESS:
3093         case BPF_CGROUP_INET_EGRESS:
3094         case BPF_CGROUP_INET_SOCK_CREATE:
3095         case BPF_CGROUP_INET_SOCK_RELEASE:
3096         case BPF_CGROUP_INET4_BIND:
3097         case BPF_CGROUP_INET6_BIND:
3098         case BPF_CGROUP_INET4_POST_BIND:
3099         case BPF_CGROUP_INET6_POST_BIND:
3100         case BPF_CGROUP_INET4_CONNECT:
3101         case BPF_CGROUP_INET6_CONNECT:
3102         case BPF_CGROUP_INET4_GETPEERNAME:
3103         case BPF_CGROUP_INET6_GETPEERNAME:
3104         case BPF_CGROUP_INET4_GETSOCKNAME:
3105         case BPF_CGROUP_INET6_GETSOCKNAME:
3106         case BPF_CGROUP_UDP4_SENDMSG:
3107         case BPF_CGROUP_UDP6_SENDMSG:
3108         case BPF_CGROUP_UDP4_RECVMSG:
3109         case BPF_CGROUP_UDP6_RECVMSG:
3110         case BPF_CGROUP_SOCK_OPS:
3111         case BPF_CGROUP_DEVICE:
3112         case BPF_CGROUP_SYSCTL:
3113         case BPF_CGROUP_GETSOCKOPT:
3114         case BPF_CGROUP_SETSOCKOPT:
3115                 return cgroup_bpf_prog_query(attr, uattr);
3116         case BPF_LIRC_MODE2:
3117                 return lirc_prog_query(attr, uattr);
3118         case BPF_FLOW_DISSECTOR:
3119         case BPF_SK_LOOKUP:
3120                 return netns_bpf_prog_query(attr, uattr);
3121         default:
3122                 return -EINVAL;
3123         }
3124 }
3125
3126 #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
3127
3128 static int bpf_prog_test_run(const union bpf_attr *attr,
3129                              union bpf_attr __user *uattr)
3130 {
3131         struct bpf_prog *prog;
3132         int ret = -ENOTSUPP;
3133
3134         if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3135                 return -EINVAL;
3136
3137         if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3138             (!attr->test.ctx_size_in && attr->test.ctx_in))
3139                 return -EINVAL;
3140
3141         if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3142             (!attr->test.ctx_size_out && attr->test.ctx_out))
3143                 return -EINVAL;
3144
3145         prog = bpf_prog_get(attr->test.prog_fd);
3146         if (IS_ERR(prog))
3147                 return PTR_ERR(prog);
3148
3149         if (prog->aux->ops->test_run)
3150                 ret = prog->aux->ops->test_run(prog, attr, uattr);
3151
3152         bpf_prog_put(prog);
3153         return ret;
3154 }
3155
3156 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3157
3158 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3159                                union bpf_attr __user *uattr,
3160                                struct idr *idr,
3161                                spinlock_t *lock)
3162 {
3163         u32 next_id = attr->start_id;
3164         int err = 0;
3165
3166         if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3167                 return -EINVAL;
3168
3169         if (!capable(CAP_SYS_ADMIN))
3170                 return -EPERM;
3171
3172         next_id++;
3173         spin_lock_bh(lock);
3174         if (!idr_get_next(idr, &next_id))
3175                 err = -ENOENT;
3176         spin_unlock_bh(lock);
3177
3178         if (!err)
3179                 err = put_user(next_id, &uattr->next_id);
3180
3181         return err;
3182 }
3183
3184 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3185 {
3186         struct bpf_map *map;
3187
3188         spin_lock_bh(&map_idr_lock);
3189 again:
3190         map = idr_get_next(&map_idr, id);
3191         if (map) {
3192                 map = __bpf_map_inc_not_zero(map, false);
3193                 if (IS_ERR(map)) {
3194                         (*id)++;
3195                         goto again;
3196                 }
3197         }
3198         spin_unlock_bh(&map_idr_lock);
3199
3200         return map;
3201 }
3202
3203 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3204 {
3205         struct bpf_prog *prog;
3206
3207         spin_lock_bh(&prog_idr_lock);
3208 again:
3209         prog = idr_get_next(&prog_idr, id);
3210         if (prog) {
3211                 prog = bpf_prog_inc_not_zero(prog);
3212                 if (IS_ERR(prog)) {
3213                         (*id)++;
3214                         goto again;
3215                 }
3216         }
3217         spin_unlock_bh(&prog_idr_lock);
3218
3219         return prog;
3220 }
3221
3222 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3223
3224 struct bpf_prog *bpf_prog_by_id(u32 id)
3225 {
3226         struct bpf_prog *prog;
3227
3228         if (!id)
3229                 return ERR_PTR(-ENOENT);
3230
3231         spin_lock_bh(&prog_idr_lock);
3232         prog = idr_find(&prog_idr, id);
3233         if (prog)
3234                 prog = bpf_prog_inc_not_zero(prog);
3235         else
3236                 prog = ERR_PTR(-ENOENT);
3237         spin_unlock_bh(&prog_idr_lock);
3238         return prog;
3239 }
3240
3241 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3242 {
3243         struct bpf_prog *prog;
3244         u32 id = attr->prog_id;
3245         int fd;
3246
3247         if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3248                 return -EINVAL;
3249
3250         if (!capable(CAP_SYS_ADMIN))
3251                 return -EPERM;
3252
3253         prog = bpf_prog_by_id(id);
3254         if (IS_ERR(prog))
3255                 return PTR_ERR(prog);
3256
3257         fd = bpf_prog_new_fd(prog);
3258         if (fd < 0)
3259                 bpf_prog_put(prog);
3260
3261         return fd;
3262 }
3263
3264 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3265
3266 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3267 {
3268         struct bpf_map *map;
3269         u32 id = attr->map_id;
3270         int f_flags;
3271         int fd;
3272
3273         if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3274             attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3275                 return -EINVAL;
3276
3277         if (!capable(CAP_SYS_ADMIN))
3278                 return -EPERM;
3279
3280         f_flags = bpf_get_file_flag(attr->open_flags);
3281         if (f_flags < 0)
3282                 return f_flags;
3283
3284         spin_lock_bh(&map_idr_lock);
3285         map = idr_find(&map_idr, id);
3286         if (map)
3287                 map = __bpf_map_inc_not_zero(map, true);
3288         else
3289                 map = ERR_PTR(-ENOENT);
3290         spin_unlock_bh(&map_idr_lock);
3291
3292         if (IS_ERR(map))
3293                 return PTR_ERR(map);
3294
3295         fd = bpf_map_new_fd(map, f_flags);
3296         if (fd < 0)
3297                 bpf_map_put_with_uref(map);
3298
3299         return fd;
3300 }
3301
3302 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3303                                               unsigned long addr, u32 *off,
3304                                               u32 *type)
3305 {
3306         const struct bpf_map *map;
3307         int i;
3308
3309         mutex_lock(&prog->aux->used_maps_mutex);
3310         for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3311                 map = prog->aux->used_maps[i];
3312                 if (map == (void *)addr) {
3313                         *type = BPF_PSEUDO_MAP_FD;
3314                         goto out;
3315                 }
3316                 if (!map->ops->map_direct_value_meta)
3317                         continue;
3318                 if (!map->ops->map_direct_value_meta(map, addr, off)) {
3319                         *type = BPF_PSEUDO_MAP_VALUE;
3320                         goto out;
3321                 }
3322         }
3323         map = NULL;
3324
3325 out:
3326         mutex_unlock(&prog->aux->used_maps_mutex);
3327         return map;
3328 }
3329
3330 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3331                                               const struct cred *f_cred)
3332 {
3333         const struct bpf_map *map;
3334         struct bpf_insn *insns;
3335         u32 off, type;
3336         u64 imm;
3337         u8 code;
3338         int i;
3339
3340         insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3341                         GFP_USER);
3342         if (!insns)
3343                 return insns;
3344
3345         for (i = 0; i < prog->len; i++) {
3346                 code = insns[i].code;
3347
3348                 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3349                         insns[i].code = BPF_JMP | BPF_CALL;
3350                         insns[i].imm = BPF_FUNC_tail_call;
3351                         /* fall-through */
3352                 }
3353                 if (code == (BPF_JMP | BPF_CALL) ||
3354                     code == (BPF_JMP | BPF_CALL_ARGS)) {
3355                         if (code == (BPF_JMP | BPF_CALL_ARGS))
3356                                 insns[i].code = BPF_JMP | BPF_CALL;
3357                         if (!bpf_dump_raw_ok(f_cred))
3358                                 insns[i].imm = 0;
3359                         continue;
3360                 }
3361                 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3362                         insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3363                         continue;
3364                 }
3365
3366                 if (code != (BPF_LD | BPF_IMM | BPF_DW))
3367                         continue;
3368
3369                 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3370                 map = bpf_map_from_imm(prog, imm, &off, &type);
3371                 if (map) {
3372                         insns[i].src_reg = type;
3373                         insns[i].imm = map->id;
3374                         insns[i + 1].imm = off;
3375                         continue;
3376                 }
3377         }
3378
3379         return insns;
3380 }
3381
3382 static int set_info_rec_size(struct bpf_prog_info *info)
3383 {
3384         /*
3385          * Ensure info.*_rec_size is the same as kernel expected size
3386          *
3387          * or
3388          *
3389          * Only allow zero *_rec_size if both _rec_size and _cnt are
3390          * zero.  In this case, the kernel will set the expected
3391          * _rec_size back to the info.
3392          */
3393
3394         if ((info->nr_func_info || info->func_info_rec_size) &&
3395             info->func_info_rec_size != sizeof(struct bpf_func_info))
3396                 return -EINVAL;
3397
3398         if ((info->nr_line_info || info->line_info_rec_size) &&
3399             info->line_info_rec_size != sizeof(struct bpf_line_info))
3400                 return -EINVAL;
3401
3402         if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3403             info->jited_line_info_rec_size != sizeof(__u64))
3404                 return -EINVAL;
3405
3406         info->func_info_rec_size = sizeof(struct bpf_func_info);
3407         info->line_info_rec_size = sizeof(struct bpf_line_info);
3408         info->jited_line_info_rec_size = sizeof(__u64);
3409
3410         return 0;
3411 }
3412
3413 static int bpf_prog_get_info_by_fd(struct file *file,
3414                                    struct bpf_prog *prog,
3415                                    const union bpf_attr *attr,
3416                                    union bpf_attr __user *uattr)
3417 {
3418         struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3419         struct bpf_prog_info info;
3420         u32 info_len = attr->info.info_len;
3421         struct bpf_prog_stats stats;
3422         char __user *uinsns;
3423         u32 ulen;
3424         int err;
3425
3426         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3427         if (err)
3428                 return err;
3429         info_len = min_t(u32, sizeof(info), info_len);
3430
3431         memset(&info, 0, sizeof(info));
3432         if (copy_from_user(&info, uinfo, info_len))
3433                 return -EFAULT;
3434
3435         info.type = prog->type;
3436         info.id = prog->aux->id;
3437         info.load_time = prog->aux->load_time;
3438         info.created_by_uid = from_kuid_munged(current_user_ns(),
3439                                                prog->aux->user->uid);
3440         info.gpl_compatible = prog->gpl_compatible;
3441
3442         memcpy(info.tag, prog->tag, sizeof(prog->tag));
3443         memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3444
3445         mutex_lock(&prog->aux->used_maps_mutex);
3446         ulen = info.nr_map_ids;
3447         info.nr_map_ids = prog->aux->used_map_cnt;
3448         ulen = min_t(u32, info.nr_map_ids, ulen);
3449         if (ulen) {
3450                 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3451                 u32 i;
3452
3453                 for (i = 0; i < ulen; i++)
3454                         if (put_user(prog->aux->used_maps[i]->id,
3455                                      &user_map_ids[i])) {
3456                                 mutex_unlock(&prog->aux->used_maps_mutex);
3457                                 return -EFAULT;
3458                         }
3459         }
3460         mutex_unlock(&prog->aux->used_maps_mutex);
3461
3462         err = set_info_rec_size(&info);
3463         if (err)
3464                 return err;
3465
3466         bpf_prog_get_stats(prog, &stats);
3467         info.run_time_ns = stats.nsecs;
3468         info.run_cnt = stats.cnt;
3469         info.recursion_misses = stats.misses;
3470
3471         if (!bpf_capable()) {
3472                 info.jited_prog_len = 0;
3473                 info.xlated_prog_len = 0;
3474                 info.nr_jited_ksyms = 0;
3475                 info.nr_jited_func_lens = 0;
3476                 info.nr_func_info = 0;
3477                 info.nr_line_info = 0;
3478                 info.nr_jited_line_info = 0;
3479                 goto done;
3480         }
3481
3482         ulen = info.xlated_prog_len;
3483         info.xlated_prog_len = bpf_prog_insn_size(prog);
3484         if (info.xlated_prog_len && ulen) {
3485                 struct bpf_insn *insns_sanitized;
3486                 bool fault;
3487
3488                 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3489                         info.xlated_prog_insns = 0;
3490                         goto done;
3491                 }
3492                 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3493                 if (!insns_sanitized)
3494                         return -ENOMEM;
3495                 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3496                 ulen = min_t(u32, info.xlated_prog_len, ulen);
3497                 fault = copy_to_user(uinsns, insns_sanitized, ulen);
3498                 kfree(insns_sanitized);
3499                 if (fault)
3500                         return -EFAULT;
3501         }
3502
3503         if (bpf_prog_is_dev_bound(prog->aux)) {
3504                 err = bpf_prog_offload_info_fill(&info, prog);
3505                 if (err)
3506                         return err;
3507                 goto done;
3508         }
3509
3510         /* NOTE: the following code is supposed to be skipped for offload.
3511          * bpf_prog_offload_info_fill() is the place to fill similar fields
3512          * for offload.
3513          */
3514         ulen = info.jited_prog_len;
3515         if (prog->aux->func_cnt) {
3516                 u32 i;
3517
3518                 info.jited_prog_len = 0;
3519                 for (i = 0; i < prog->aux->func_cnt; i++)
3520                         info.jited_prog_len += prog->aux->func[i]->jited_len;
3521         } else {
3522                 info.jited_prog_len = prog->jited_len;
3523         }
3524
3525         if (info.jited_prog_len && ulen) {
3526                 if (bpf_dump_raw_ok(file->f_cred)) {
3527                         uinsns = u64_to_user_ptr(info.jited_prog_insns);
3528                         ulen = min_t(u32, info.jited_prog_len, ulen);
3529
3530                         /* for multi-function programs, copy the JITed
3531                          * instructions for all the functions
3532                          */
3533                         if (prog->aux->func_cnt) {
3534                                 u32 len, free, i;
3535                                 u8 *img;
3536
3537                                 free = ulen;
3538                                 for (i = 0; i < prog->aux->func_cnt; i++) {
3539                                         len = prog->aux->func[i]->jited_len;
3540                                         len = min_t(u32, len, free);
3541                                         img = (u8 *) prog->aux->func[i]->bpf_func;
3542                                         if (copy_to_user(uinsns, img, len))
3543                                                 return -EFAULT;
3544                                         uinsns += len;
3545                                         free -= len;
3546                                         if (!free)
3547                                                 break;
3548                                 }
3549                         } else {
3550                                 if (copy_to_user(uinsns, prog->bpf_func, ulen))
3551                                         return -EFAULT;
3552                         }
3553                 } else {
3554                         info.jited_prog_insns = 0;
3555                 }
3556         }
3557
3558         ulen = info.nr_jited_ksyms;
3559         info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3560         if (ulen) {
3561                 if (bpf_dump_raw_ok(file->f_cred)) {
3562                         unsigned long ksym_addr;
3563                         u64 __user *user_ksyms;
3564                         u32 i;
3565
3566                         /* copy the address of the kernel symbol
3567                          * corresponding to each function
3568                          */
3569                         ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3570                         user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3571                         if (prog->aux->func_cnt) {
3572                                 for (i = 0; i < ulen; i++) {
3573                                         ksym_addr = (unsigned long)
3574                                                 prog->aux->func[i]->bpf_func;
3575                                         if (put_user((u64) ksym_addr,
3576                                                      &user_ksyms[i]))
3577                                                 return -EFAULT;
3578                                 }
3579                         } else {
3580                                 ksym_addr = (unsigned long) prog->bpf_func;
3581                                 if (put_user((u64) ksym_addr, &user_ksyms[0]))
3582                                         return -EFAULT;
3583                         }
3584                 } else {
3585                         info.jited_ksyms = 0;
3586                 }
3587         }
3588
3589         ulen = info.nr_jited_func_lens;
3590         info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3591         if (ulen) {
3592                 if (bpf_dump_raw_ok(file->f_cred)) {
3593                         u32 __user *user_lens;
3594                         u32 func_len, i;
3595
3596                         /* copy the JITed image lengths for each function */
3597                         ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3598                         user_lens = u64_to_user_ptr(info.jited_func_lens);
3599                         if (prog->aux->func_cnt) {
3600                                 for (i = 0; i < ulen; i++) {
3601                                         func_len =
3602                                                 prog->aux->func[i]->jited_len;
3603                                         if (put_user(func_len, &user_lens[i]))
3604                                                 return -EFAULT;
3605                                 }
3606                         } else {
3607                                 func_len = prog->jited_len;
3608                                 if (put_user(func_len, &user_lens[0]))
3609                                         return -EFAULT;
3610                         }
3611                 } else {
3612                         info.jited_func_lens = 0;
3613                 }
3614         }
3615
3616         if (prog->aux->btf)
3617                 info.btf_id = btf_obj_id(prog->aux->btf);
3618
3619         ulen = info.nr_func_info;
3620         info.nr_func_info = prog->aux->func_info_cnt;
3621         if (info.nr_func_info && ulen) {
3622                 char __user *user_finfo;
3623
3624                 user_finfo = u64_to_user_ptr(info.func_info);
3625                 ulen = min_t(u32, info.nr_func_info, ulen);
3626                 if (copy_to_user(user_finfo, prog->aux->func_info,
3627                                  info.func_info_rec_size * ulen))
3628                         return -EFAULT;
3629         }
3630
3631         ulen = info.nr_line_info;
3632         info.nr_line_info = prog->aux->nr_linfo;
3633         if (info.nr_line_info && ulen) {
3634                 __u8 __user *user_linfo;
3635
3636                 user_linfo = u64_to_user_ptr(info.line_info);
3637                 ulen = min_t(u32, info.nr_line_info, ulen);
3638                 if (copy_to_user(user_linfo, prog->aux->linfo,
3639                                  info.line_info_rec_size * ulen))
3640                         return -EFAULT;
3641         }
3642
3643         ulen = info.nr_jited_line_info;
3644         if (prog->aux->jited_linfo)
3645                 info.nr_jited_line_info = prog->aux->nr_linfo;
3646         else
3647                 info.nr_jited_line_info = 0;
3648         if (info.nr_jited_line_info && ulen) {
3649                 if (bpf_dump_raw_ok(file->f_cred)) {
3650                         __u64 __user *user_linfo;
3651                         u32 i;
3652
3653                         user_linfo = u64_to_user_ptr(info.jited_line_info);
3654                         ulen = min_t(u32, info.nr_jited_line_info, ulen);
3655                         for (i = 0; i < ulen; i++) {
3656                                 if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3657                                              &user_linfo[i]))
3658                                         return -EFAULT;
3659                         }
3660                 } else {
3661                         info.jited_line_info = 0;
3662                 }
3663         }
3664
3665         ulen = info.nr_prog_tags;
3666         info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3667         if (ulen) {
3668                 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3669                 u32 i;
3670
3671                 user_prog_tags = u64_to_user_ptr(info.prog_tags);
3672                 ulen = min_t(u32, info.nr_prog_tags, ulen);
3673                 if (prog->aux->func_cnt) {
3674                         for (i = 0; i < ulen; i++) {
3675                                 if (copy_to_user(user_prog_tags[i],
3676                                                  prog->aux->func[i]->tag,
3677                                                  BPF_TAG_SIZE))
3678                                         return -EFAULT;
3679                         }
3680                 } else {
3681                         if (copy_to_user(user_prog_tags[0],
3682                                          prog->tag, BPF_TAG_SIZE))
3683                                 return -EFAULT;
3684                 }
3685         }
3686
3687 done:
3688         if (copy_to_user(uinfo, &info, info_len) ||
3689             put_user(info_len, &uattr->info.info_len))
3690                 return -EFAULT;
3691
3692         return 0;
3693 }
3694
3695 static int bpf_map_get_info_by_fd(struct file *file,
3696                                   struct bpf_map *map,
3697                                   const union bpf_attr *attr,
3698                                   union bpf_attr __user *uattr)
3699 {
3700         struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3701         struct bpf_map_info info;
3702         u32 info_len = attr->info.info_len;
3703         int err;
3704
3705         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3706         if (err)
3707                 return err;
3708         info_len = min_t(u32, sizeof(info), info_len);
3709
3710         memset(&info, 0, sizeof(info));
3711         info.type = map->map_type;
3712         info.id = map->id;
3713         info.key_size = map->key_size;
3714         info.value_size = map->value_size;
3715         info.max_entries = map->max_entries;
3716         info.map_flags = map->map_flags;
3717         memcpy(info.name, map->name, sizeof(map->name));
3718
3719         if (map->btf) {
3720                 info.btf_id = btf_obj_id(map->btf);
3721                 info.btf_key_type_id = map->btf_key_type_id;
3722                 info.btf_value_type_id = map->btf_value_type_id;
3723         }
3724         info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3725
3726         if (bpf_map_is_dev_bound(map)) {
3727                 err = bpf_map_offload_info_fill(&info, map);
3728                 if (err)
3729                         return err;
3730         }
3731
3732         if (copy_to_user(uinfo, &info, info_len) ||
3733             put_user(info_len, &uattr->info.info_len))
3734                 return -EFAULT;
3735
3736         return 0;
3737 }
3738
3739 static int bpf_btf_get_info_by_fd(struct file *file,
3740                                   struct btf *btf,
3741                                   const union bpf_attr *attr,
3742                                   union bpf_attr __user *uattr)
3743 {
3744         struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3745         u32 info_len = attr->info.info_len;
3746         int err;
3747
3748         err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
3749         if (err)
3750                 return err;
3751
3752         return btf_get_info_by_fd(btf, attr, uattr);
3753 }
3754
3755 static int bpf_link_get_info_by_fd(struct file *file,
3756                                   struct bpf_link *link,
3757                                   const union bpf_attr *attr,
3758                                   union bpf_attr __user *uattr)
3759 {
3760         struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3761         struct bpf_link_info info;
3762         u32 info_len = attr->info.info_len;
3763         int err;
3764
3765         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3766         if (err)
3767                 return err;
3768         info_len = min_t(u32, sizeof(info), info_len);
3769
3770         memset(&info, 0, sizeof(info));
3771         if (copy_from_user(&info, uinfo, info_len))
3772                 return -EFAULT;
3773
3774         info.type = link->type;
3775         info.id = link->id;
3776         info.prog_id = link->prog->aux->id;
3777
3778         if (link->ops->fill_link_info) {
3779                 err = link->ops->fill_link_info(link, &info);
3780                 if (err)
3781                         return err;
3782         }
3783
3784         if (copy_to_user(uinfo, &info, info_len) ||
3785             put_user(info_len, &uattr->info.info_len))
3786                 return -EFAULT;
3787
3788         return 0;
3789 }
3790
3791
3792 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3793
3794 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3795                                   union bpf_attr __user *uattr)
3796 {
3797         int ufd = attr->info.bpf_fd;
3798         struct fd f;
3799         int err;
3800
3801         if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3802                 return -EINVAL;
3803
3804         f = fdget(ufd);
3805         if (!f.file)
3806                 return -EBADFD;
3807
3808         if (f.file->f_op == &bpf_prog_fops)
3809                 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
3810                                               uattr);
3811         else if (f.file->f_op == &bpf_map_fops)
3812                 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
3813                                              uattr);
3814         else if (f.file->f_op == &btf_fops)
3815                 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
3816         else if (f.file->f_op == &bpf_link_fops)
3817                 err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
3818                                               attr, uattr);
3819         else
3820                 err = -EINVAL;
3821
3822         fdput(f);
3823         return err;
3824 }
3825
3826 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3827
3828 static int bpf_btf_load(const union bpf_attr *attr)
3829 {
3830         if (CHECK_ATTR(BPF_BTF_LOAD))
3831                 return -EINVAL;
3832
3833         if (!bpf_capable())
3834                 return -EPERM;
3835
3836         return btf_new_fd(attr);
3837 }
3838
3839 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3840
3841 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3842 {
3843         if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
3844                 return -EINVAL;
3845
3846         if (!capable(CAP_SYS_ADMIN))
3847                 return -EPERM;
3848
3849         return btf_get_fd_by_id(attr->btf_id);
3850 }
3851
3852 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
3853                                     union bpf_attr __user *uattr,
3854                                     u32 prog_id, u32 fd_type,
3855                                     const char *buf, u64 probe_offset,
3856                                     u64 probe_addr)
3857 {
3858         char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
3859         u32 len = buf ? strlen(buf) : 0, input_len;
3860         int err = 0;
3861
3862         if (put_user(len, &uattr->task_fd_query.buf_len))
3863                 return -EFAULT;
3864         input_len = attr->task_fd_query.buf_len;
3865         if (input_len && ubuf) {
3866                 if (!len) {
3867                         /* nothing to copy, just make ubuf NULL terminated */
3868                         char zero = '\0';
3869
3870                         if (put_user(zero, ubuf))
3871                                 return -EFAULT;
3872                 } else if (input_len >= len + 1) {
3873                         /* ubuf can hold the string with NULL terminator */
3874                         if (copy_to_user(ubuf, buf, len + 1))
3875                                 return -EFAULT;
3876                 } else {
3877                         /* ubuf cannot hold the string with NULL terminator,
3878                          * do a partial copy with NULL terminator.
3879                          */
3880                         char zero = '\0';
3881
3882                         err = -ENOSPC;
3883                         if (copy_to_user(ubuf, buf, input_len - 1))
3884                                 return -EFAULT;
3885                         if (put_user(zero, ubuf + input_len - 1))
3886                                 return -EFAULT;
3887                 }
3888         }
3889
3890         if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
3891             put_user(fd_type, &uattr->task_fd_query.fd_type) ||
3892             put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
3893             put_user(probe_addr, &uattr->task_fd_query.probe_addr))
3894                 return -EFAULT;
3895
3896         return err;
3897 }
3898
3899 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
3900
3901 static int bpf_task_fd_query(const union bpf_attr *attr,
3902                              union bpf_attr __user *uattr)
3903 {
3904         pid_t pid = attr->task_fd_query.pid;
3905         u32 fd = attr->task_fd_query.fd;
3906         const struct perf_event *event;
3907         struct task_struct *task;
3908         struct file *file;
3909         int err;
3910
3911         if (CHECK_ATTR(BPF_TASK_FD_QUERY))
3912                 return -EINVAL;
3913
3914         if (!capable(CAP_SYS_ADMIN))
3915                 return -EPERM;
3916
3917         if (attr->task_fd_query.flags != 0)
3918                 return -EINVAL;
3919
3920         task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3921         if (!task)
3922                 return -ENOENT;
3923
3924         err = 0;
3925         file = fget_task(task, fd);
3926         put_task_struct(task);
3927         if (!file)
3928                 return -EBADF;
3929
3930         if (file->f_op == &bpf_link_fops) {
3931                 struct bpf_link *link = file->private_data;
3932
3933                 if (link->ops == &bpf_raw_tp_link_lops) {
3934                         struct bpf_raw_tp_link *raw_tp =
3935                                 container_of(link, struct bpf_raw_tp_link, link);
3936                         struct bpf_raw_event_map *btp = raw_tp->btp;
3937
3938                         err = bpf_task_fd_query_copy(attr, uattr,
3939                                                      raw_tp->link.prog->aux->id,
3940                                                      BPF_FD_TYPE_RAW_TRACEPOINT,
3941                                                      btp->tp->name, 0, 0);
3942                         goto put_file;
3943                 }
3944                 goto out_not_supp;
3945         }
3946
3947         event = perf_get_event(file);
3948         if (!IS_ERR(event)) {
3949                 u64 probe_offset, probe_addr;
3950                 u32 prog_id, fd_type;
3951                 const char *buf;
3952
3953                 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
3954                                               &buf, &probe_offset,
3955                                               &probe_addr);
3956                 if (!err)
3957                         err = bpf_task_fd_query_copy(attr, uattr, prog_id,
3958                                                      fd_type, buf,
3959                                                      probe_offset,
3960                                                      probe_addr);
3961                 goto put_file;
3962         }
3963
3964 out_not_supp:
3965         err = -ENOTSUPP;
3966 put_file:
3967         fput(file);
3968         return err;
3969 }
3970
3971 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
3972
3973 #define BPF_DO_BATCH(fn)                        \
3974         do {                                    \
3975                 if (!fn) {                      \
3976                         err = -ENOTSUPP;        \
3977                         goto err_put;           \
3978                 }                               \
3979                 err = fn(map, attr, uattr);     \
3980         } while (0)
3981
3982 static int bpf_map_do_batch(const union bpf_attr *attr,
3983                             union bpf_attr __user *uattr,
3984                             int cmd)
3985 {
3986         struct bpf_map *map;
3987         int err, ufd;
3988         struct fd f;
3989
3990         if (CHECK_ATTR(BPF_MAP_BATCH))
3991                 return -EINVAL;
3992
3993         ufd = attr->batch.map_fd;
3994         f = fdget(ufd);
3995         map = __bpf_map_get(f);
3996         if (IS_ERR(map))
3997                 return PTR_ERR(map);
3998
3999         if ((cmd == BPF_MAP_LOOKUP_BATCH ||
4000              cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
4001             !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4002                 err = -EPERM;
4003                 goto err_put;
4004         }
4005
4006         if (cmd != BPF_MAP_LOOKUP_BATCH &&
4007             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4008                 err = -EPERM;
4009                 goto err_put;
4010         }
4011
4012         if (cmd == BPF_MAP_LOOKUP_BATCH)
4013                 BPF_DO_BATCH(map->ops->map_lookup_batch);
4014         else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
4015                 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
4016         else if (cmd == BPF_MAP_UPDATE_BATCH)
4017                 BPF_DO_BATCH(map->ops->map_update_batch);
4018         else
4019                 BPF_DO_BATCH(map->ops->map_delete_batch);
4020
4021 err_put:
4022         fdput(f);
4023         return err;
4024 }
4025
4026 static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
4027 {
4028         if (attr->link_create.attach_type != prog->expected_attach_type)
4029                 return -EINVAL;
4030
4031         if (prog->expected_attach_type == BPF_TRACE_ITER)
4032                 return bpf_iter_link_attach(attr, prog);
4033         else if (prog->type == BPF_PROG_TYPE_EXT)
4034                 return bpf_tracing_prog_attach(prog,
4035                                                attr->link_create.target_fd,
4036                                                attr->link_create.target_btf_id);
4037         return -EINVAL;
4038 }
4039
4040 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
4041 static int link_create(union bpf_attr *attr)
4042 {
4043         enum bpf_prog_type ptype;
4044         struct bpf_prog *prog;
4045         int ret;
4046
4047         if (CHECK_ATTR(BPF_LINK_CREATE))
4048                 return -EINVAL;
4049
4050         prog = bpf_prog_get(attr->link_create.prog_fd);
4051         if (IS_ERR(prog))
4052                 return PTR_ERR(prog);
4053
4054         ret = bpf_prog_attach_check_attach_type(prog,
4055                                                 attr->link_create.attach_type);
4056         if (ret)
4057                 goto out;
4058
4059         if (prog->type == BPF_PROG_TYPE_EXT) {
4060                 ret = tracing_bpf_link_attach(attr, prog);
4061                 goto out;
4062         }
4063
4064         ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4065         if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4066                 ret = -EINVAL;
4067                 goto out;
4068         }
4069
4070         switch (ptype) {
4071         case BPF_PROG_TYPE_CGROUP_SKB:
4072         case BPF_PROG_TYPE_CGROUP_SOCK:
4073         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4074         case BPF_PROG_TYPE_SOCK_OPS:
4075         case BPF_PROG_TYPE_CGROUP_DEVICE:
4076         case BPF_PROG_TYPE_CGROUP_SYSCTL:
4077         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4078                 ret = cgroup_bpf_link_attach(attr, prog);
4079                 break;
4080         case BPF_PROG_TYPE_TRACING:
4081                 ret = tracing_bpf_link_attach(attr, prog);
4082                 break;
4083         case BPF_PROG_TYPE_FLOW_DISSECTOR:
4084         case BPF_PROG_TYPE_SK_LOOKUP:
4085                 ret = netns_bpf_link_create(attr, prog);
4086                 break;
4087 #ifdef CONFIG_NET
4088         case BPF_PROG_TYPE_XDP:
4089                 ret = bpf_xdp_link_attach(attr, prog);
4090                 break;
4091 #endif
4092         default:
4093                 ret = -EINVAL;
4094         }
4095
4096 out:
4097         if (ret < 0)
4098                 bpf_prog_put(prog);
4099         return ret;
4100 }
4101
4102 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
4103
4104 static int link_update(union bpf_attr *attr)
4105 {
4106         struct bpf_prog *old_prog = NULL, *new_prog;
4107         struct bpf_link *link;
4108         u32 flags;
4109         int ret;
4110
4111         if (CHECK_ATTR(BPF_LINK_UPDATE))
4112                 return -EINVAL;
4113
4114         flags = attr->link_update.flags;
4115         if (flags & ~BPF_F_REPLACE)
4116                 return -EINVAL;
4117
4118         link = bpf_link_get_from_fd(attr->link_update.link_fd);
4119         if (IS_ERR(link))
4120                 return PTR_ERR(link);
4121
4122         new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
4123         if (IS_ERR(new_prog)) {
4124                 ret = PTR_ERR(new_prog);
4125                 goto out_put_link;
4126         }
4127
4128         if (flags & BPF_F_REPLACE) {
4129                 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
4130                 if (IS_ERR(old_prog)) {
4131                         ret = PTR_ERR(old_prog);
4132                         old_prog = NULL;
4133                         goto out_put_progs;
4134                 }
4135         } else if (attr->link_update.old_prog_fd) {
4136                 ret = -EINVAL;
4137                 goto out_put_progs;
4138         }
4139
4140         if (link->ops->update_prog)
4141                 ret = link->ops->update_prog(link, new_prog, old_prog);
4142         else
4143                 ret = -EINVAL;
4144
4145 out_put_progs:
4146         if (old_prog)
4147                 bpf_prog_put(old_prog);
4148         if (ret)
4149                 bpf_prog_put(new_prog);
4150 out_put_link:
4151         bpf_link_put(link);
4152         return ret;
4153 }
4154
4155 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4156
4157 static int link_detach(union bpf_attr *attr)
4158 {
4159         struct bpf_link *link;
4160         int ret;
4161
4162         if (CHECK_ATTR(BPF_LINK_DETACH))
4163                 return -EINVAL;
4164
4165         link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4166         if (IS_ERR(link))
4167                 return PTR_ERR(link);
4168
4169         if (link->ops->detach)
4170                 ret = link->ops->detach(link);
4171         else
4172                 ret = -EOPNOTSUPP;
4173
4174         bpf_link_put(link);
4175         return ret;
4176 }
4177
4178 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4179 {
4180         return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4181 }
4182
4183 struct bpf_link *bpf_link_by_id(u32 id)
4184 {
4185         struct bpf_link *link;
4186
4187         if (!id)
4188                 return ERR_PTR(-ENOENT);
4189
4190         spin_lock_bh(&link_idr_lock);
4191         /* before link is "settled", ID is 0, pretend it doesn't exist yet */
4192         link = idr_find(&link_idr, id);
4193         if (link) {
4194                 if (link->id)
4195                         link = bpf_link_inc_not_zero(link);
4196                 else
4197                         link = ERR_PTR(-EAGAIN);
4198         } else {
4199                 link = ERR_PTR(-ENOENT);
4200         }
4201         spin_unlock_bh(&link_idr_lock);
4202         return link;
4203 }
4204
4205 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4206
4207 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4208 {
4209         struct bpf_link *link;
4210         u32 id = attr->link_id;
4211         int fd;
4212
4213         if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4214                 return -EINVAL;
4215
4216         if (!capable(CAP_SYS_ADMIN))
4217                 return -EPERM;
4218
4219         link = bpf_link_by_id(id);
4220         if (IS_ERR(link))
4221                 return PTR_ERR(link);
4222
4223         fd = bpf_link_new_fd(link);
4224         if (fd < 0)
4225                 bpf_link_put(link);
4226
4227         return fd;
4228 }
4229
4230 DEFINE_MUTEX(bpf_stats_enabled_mutex);
4231
4232 static int bpf_stats_release(struct inode *inode, struct file *file)
4233 {
4234         mutex_lock(&bpf_stats_enabled_mutex);
4235         static_key_slow_dec(&bpf_stats_enabled_key.key);
4236         mutex_unlock(&bpf_stats_enabled_mutex);
4237         return 0;
4238 }
4239
4240 static const struct file_operations bpf_stats_fops = {
4241         .release = bpf_stats_release,
4242 };
4243
4244 static int bpf_enable_runtime_stats(void)
4245 {
4246         int fd;
4247
4248         mutex_lock(&bpf_stats_enabled_mutex);
4249
4250         /* Set a very high limit to avoid overflow */
4251         if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4252                 mutex_unlock(&bpf_stats_enabled_mutex);
4253                 return -EBUSY;
4254         }
4255
4256         fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4257         if (fd >= 0)
4258                 static_key_slow_inc(&bpf_stats_enabled_key.key);
4259
4260         mutex_unlock(&bpf_stats_enabled_mutex);
4261         return fd;
4262 }
4263
4264 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4265
4266 static int bpf_enable_stats(union bpf_attr *attr)
4267 {
4268
4269         if (CHECK_ATTR(BPF_ENABLE_STATS))
4270                 return -EINVAL;
4271
4272         if (!capable(CAP_SYS_ADMIN))
4273                 return -EPERM;
4274
4275         switch (attr->enable_stats.type) {
4276         case BPF_STATS_RUN_TIME:
4277                 return bpf_enable_runtime_stats();
4278         default:
4279                 break;
4280         }
4281         return -EINVAL;
4282 }
4283
4284 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4285
4286 static int bpf_iter_create(union bpf_attr *attr)
4287 {
4288         struct bpf_link *link;
4289         int err;
4290
4291         if (CHECK_ATTR(BPF_ITER_CREATE))
4292                 return -EINVAL;
4293
4294         if (attr->iter_create.flags)
4295                 return -EINVAL;
4296
4297         link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4298         if (IS_ERR(link))
4299                 return PTR_ERR(link);
4300
4301         err = bpf_iter_new_fd(link);
4302         bpf_link_put(link);
4303
4304         return err;
4305 }
4306
4307 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4308
4309 static int bpf_prog_bind_map(union bpf_attr *attr)
4310 {
4311         struct bpf_prog *prog;
4312         struct bpf_map *map;
4313         struct bpf_map **used_maps_old, **used_maps_new;
4314         int i, ret = 0;
4315
4316         if (CHECK_ATTR(BPF_PROG_BIND_MAP))
4317                 return -EINVAL;
4318
4319         if (attr->prog_bind_map.flags)
4320                 return -EINVAL;
4321
4322         prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4323         if (IS_ERR(prog))
4324                 return PTR_ERR(prog);
4325
4326         map = bpf_map_get(attr->prog_bind_map.map_fd);
4327         if (IS_ERR(map)) {
4328                 ret = PTR_ERR(map);
4329                 goto out_prog_put;
4330         }
4331
4332         mutex_lock(&prog->aux->used_maps_mutex);
4333
4334         used_maps_old = prog->aux->used_maps;
4335
4336         for (i = 0; i < prog->aux->used_map_cnt; i++)
4337                 if (used_maps_old[i] == map) {
4338                         bpf_map_put(map);
4339                         goto out_unlock;
4340                 }
4341
4342         used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
4343                                       sizeof(used_maps_new[0]),
4344                                       GFP_KERNEL);
4345         if (!used_maps_new) {
4346                 ret = -ENOMEM;
4347                 goto out_unlock;
4348         }
4349
4350         memcpy(used_maps_new, used_maps_old,
4351                sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4352         used_maps_new[prog->aux->used_map_cnt] = map;
4353
4354         prog->aux->used_map_cnt++;
4355         prog->aux->used_maps = used_maps_new;
4356
4357         kfree(used_maps_old);
4358
4359 out_unlock:
4360         mutex_unlock(&prog->aux->used_maps_mutex);
4361
4362         if (ret)
4363                 bpf_map_put(map);
4364 out_prog_put:
4365         bpf_prog_put(prog);
4366         return ret;
4367 }
4368
4369 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4370 {
4371         union bpf_attr attr;
4372         int err;
4373
4374         if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4375                 return -EPERM;
4376
4377         err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4378         if (err)
4379                 return err;
4380         size = min_t(u32, size, sizeof(attr));
4381
4382         /* copy attributes from user space, may be less than sizeof(bpf_attr) */
4383         memset(&attr, 0, sizeof(attr));
4384         if (copy_from_user(&attr, uattr, size) != 0)
4385                 return -EFAULT;
4386
4387         err = security_bpf(cmd, &attr, size);
4388         if (err < 0)
4389                 return err;
4390
4391         switch (cmd) {
4392         case BPF_MAP_CREATE:
4393                 err = map_create(&attr);
4394                 break;
4395         case BPF_MAP_LOOKUP_ELEM:
4396                 err = map_lookup_elem(&attr);
4397                 break;
4398         case BPF_MAP_UPDATE_ELEM:
4399                 err = map_update_elem(&attr);
4400                 break;
4401         case BPF_MAP_DELETE_ELEM:
4402                 err = map_delete_elem(&attr);
4403                 break;
4404         case BPF_MAP_GET_NEXT_KEY:
4405                 err = map_get_next_key(&attr);
4406                 break;
4407         case BPF_MAP_FREEZE:
4408                 err = map_freeze(&attr);
4409                 break;
4410         case BPF_PROG_LOAD:
4411                 err = bpf_prog_load(&attr, uattr);
4412                 break;
4413         case BPF_OBJ_PIN:
4414                 err = bpf_obj_pin(&attr);
4415                 break;
4416         case BPF_OBJ_GET:
4417                 err = bpf_obj_get(&attr);
4418                 break;
4419         case BPF_PROG_ATTACH:
4420                 err = bpf_prog_attach(&attr);
4421                 break;
4422         case BPF_PROG_DETACH:
4423                 err = bpf_prog_detach(&attr);
4424                 break;
4425         case BPF_PROG_QUERY:
4426                 err = bpf_prog_query(&attr, uattr);
4427                 break;
4428         case BPF_PROG_TEST_RUN:
4429                 err = bpf_prog_test_run(&attr, uattr);
4430                 break;
4431         case BPF_PROG_GET_NEXT_ID:
4432                 err = bpf_obj_get_next_id(&attr, uattr,
4433                                           &prog_idr, &prog_idr_lock);
4434                 break;
4435         case BPF_MAP_GET_NEXT_ID:
4436                 err = bpf_obj_get_next_id(&attr, uattr,
4437                                           &map_idr, &map_idr_lock);
4438                 break;
4439         case BPF_BTF_GET_NEXT_ID:
4440                 err = bpf_obj_get_next_id(&attr, uattr,
4441                                           &btf_idr, &btf_idr_lock);
4442                 break;
4443         case BPF_PROG_GET_FD_BY_ID:
4444                 err = bpf_prog_get_fd_by_id(&attr);
4445                 break;
4446         case BPF_MAP_GET_FD_BY_ID:
4447                 err = bpf_map_get_fd_by_id(&attr);
4448                 break;
4449         case BPF_OBJ_GET_INFO_BY_FD:
4450                 err = bpf_obj_get_info_by_fd(&attr, uattr);
4451                 break;
4452         case BPF_RAW_TRACEPOINT_OPEN:
4453                 err = bpf_raw_tracepoint_open(&attr);
4454                 break;
4455         case BPF_BTF_LOAD:
4456                 err = bpf_btf_load(&attr);
4457                 break;
4458         case BPF_BTF_GET_FD_BY_ID:
4459                 err = bpf_btf_get_fd_by_id(&attr);
4460                 break;
4461         case BPF_TASK_FD_QUERY:
4462                 err = bpf_task_fd_query(&attr, uattr);
4463                 break;
4464         case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4465                 err = map_lookup_and_delete_elem(&attr);
4466                 break;
4467         case BPF_MAP_LOOKUP_BATCH:
4468                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
4469                 break;
4470         case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4471                 err = bpf_map_do_batch(&attr, uattr,
4472                                        BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4473                 break;
4474         case BPF_MAP_UPDATE_BATCH:
4475                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
4476                 break;
4477         case BPF_MAP_DELETE_BATCH:
4478                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
4479                 break;
4480         case BPF_LINK_CREATE:
4481                 err = link_create(&attr);
4482                 break;
4483         case BPF_LINK_UPDATE:
4484                 err = link_update(&attr);
4485                 break;
4486         case BPF_LINK_GET_FD_BY_ID:
4487                 err = bpf_link_get_fd_by_id(&attr);
4488                 break;
4489         case BPF_LINK_GET_NEXT_ID:
4490                 err = bpf_obj_get_next_id(&attr, uattr,
4491                                           &link_idr, &link_idr_lock);
4492                 break;
4493         case BPF_ENABLE_STATS:
4494                 err = bpf_enable_stats(&attr);
4495                 break;
4496         case BPF_ITER_CREATE:
4497                 err = bpf_iter_create(&attr);
4498                 break;
4499         case BPF_LINK_DETACH:
4500                 err = link_detach(&attr);
4501                 break;
4502         case BPF_PROG_BIND_MAP:
4503                 err = bpf_prog_bind_map(&attr);
4504                 break;
4505         default:
4506                 err = -EINVAL;
4507                 break;
4508         }
4509
4510         return err;
4511 }