Merge tag 's390-5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-2.6-microblaze.git] / kernel / bpf / syscall.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/bpf_verifier.h>
8 #include <linux/btf.h>
9 #include <linux/syscalls.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/vmalloc.h>
13 #include <linux/mmzone.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/fdtable.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/license.h>
19 #include <linux/filter.h>
20 #include <linux/version.h>
21 #include <linux/kernel.h>
22 #include <linux/idr.h>
23 #include <linux/cred.h>
24 #include <linux/timekeeping.h>
25 #include <linux/ctype.h>
26 #include <linux/nospec.h>
27 #include <linux/audit.h>
28 #include <uapi/linux/btf.h>
29 #include <linux/pgtable.h>
30 #include <linux/bpf_lsm.h>
31 #include <linux/poll.h>
32 #include <linux/bpf-netns.h>
33 #include <linux/rcupdate_trace.h>
34 #include <linux/memcontrol.h>
35
36 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
37                           (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
38                           (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
39 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
40 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
41 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
42                         IS_FD_HASH(map))
43
44 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
45
46 DEFINE_PER_CPU(int, bpf_prog_active);
47 static DEFINE_IDR(prog_idr);
48 static DEFINE_SPINLOCK(prog_idr_lock);
49 static DEFINE_IDR(map_idr);
50 static DEFINE_SPINLOCK(map_idr_lock);
51 static DEFINE_IDR(link_idr);
52 static DEFINE_SPINLOCK(link_idr_lock);
53
54 int sysctl_unprivileged_bpf_disabled __read_mostly;
55
56 static const struct bpf_map_ops * const bpf_map_types[] = {
57 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
58 #define BPF_MAP_TYPE(_id, _ops) \
59         [_id] = &_ops,
60 #define BPF_LINK_TYPE(_id, _name)
61 #include <linux/bpf_types.h>
62 #undef BPF_PROG_TYPE
63 #undef BPF_MAP_TYPE
64 #undef BPF_LINK_TYPE
65 };
66
67 /*
68  * If we're handed a bigger struct than we know of, ensure all the unknown bits
69  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
70  * we don't know about yet.
71  *
72  * There is a ToCToU between this function call and the following
73  * copy_from_user() call. However, this is not a concern since this function is
74  * meant to be a future-proofing of bits.
75  */
76 int bpf_check_uarg_tail_zero(void __user *uaddr,
77                              size_t expected_size,
78                              size_t actual_size)
79 {
80         unsigned char __user *addr = uaddr + expected_size;
81         int res;
82
83         if (unlikely(actual_size > PAGE_SIZE))  /* silly large */
84                 return -E2BIG;
85
86         if (actual_size <= expected_size)
87                 return 0;
88
89         res = check_zeroed_user(addr, actual_size - expected_size);
90         if (res < 0)
91                 return res;
92         return res ? 0 : -E2BIG;
93 }
94
95 const struct bpf_map_ops bpf_map_offload_ops = {
96         .map_meta_equal = bpf_map_meta_equal,
97         .map_alloc = bpf_map_offload_map_alloc,
98         .map_free = bpf_map_offload_map_free,
99         .map_check_btf = map_check_no_btf,
100 };
101
102 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
103 {
104         const struct bpf_map_ops *ops;
105         u32 type = attr->map_type;
106         struct bpf_map *map;
107         int err;
108
109         if (type >= ARRAY_SIZE(bpf_map_types))
110                 return ERR_PTR(-EINVAL);
111         type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
112         ops = bpf_map_types[type];
113         if (!ops)
114                 return ERR_PTR(-EINVAL);
115
116         if (ops->map_alloc_check) {
117                 err = ops->map_alloc_check(attr);
118                 if (err)
119                         return ERR_PTR(err);
120         }
121         if (attr->map_ifindex)
122                 ops = &bpf_map_offload_ops;
123         map = ops->map_alloc(attr);
124         if (IS_ERR(map))
125                 return map;
126         map->ops = ops;
127         map->map_type = type;
128         return map;
129 }
130
131 static u32 bpf_map_value_size(const struct bpf_map *map)
132 {
133         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
134             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
135             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
136             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
137                 return round_up(map->value_size, 8) * num_possible_cpus();
138         else if (IS_FD_MAP(map))
139                 return sizeof(u32);
140         else
141                 return  map->value_size;
142 }
143
144 static void maybe_wait_bpf_programs(struct bpf_map *map)
145 {
146         /* Wait for any running BPF programs to complete so that
147          * userspace, when we return to it, knows that all programs
148          * that could be running use the new map value.
149          */
150         if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
151             map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
152                 synchronize_rcu();
153 }
154
155 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
156                                 void *value, __u64 flags)
157 {
158         int err;
159
160         /* Need to create a kthread, thus must support schedule */
161         if (bpf_map_is_dev_bound(map)) {
162                 return bpf_map_offload_update_elem(map, key, value, flags);
163         } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
164                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
165                 return map->ops->map_update_elem(map, key, value, flags);
166         } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
167                    map->map_type == BPF_MAP_TYPE_SOCKMAP) {
168                 return sock_map_update_elem_sys(map, key, value, flags);
169         } else if (IS_FD_PROG_ARRAY(map)) {
170                 return bpf_fd_array_map_update_elem(map, f.file, key, value,
171                                                     flags);
172         }
173
174         bpf_disable_instrumentation();
175         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
176             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
177                 err = bpf_percpu_hash_update(map, key, value, flags);
178         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
179                 err = bpf_percpu_array_update(map, key, value, flags);
180         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
181                 err = bpf_percpu_cgroup_storage_update(map, key, value,
182                                                        flags);
183         } else if (IS_FD_ARRAY(map)) {
184                 rcu_read_lock();
185                 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
186                                                    flags);
187                 rcu_read_unlock();
188         } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
189                 rcu_read_lock();
190                 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
191                                                   flags);
192                 rcu_read_unlock();
193         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
194                 /* rcu_read_lock() is not needed */
195                 err = bpf_fd_reuseport_array_update_elem(map, key, value,
196                                                          flags);
197         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
198                    map->map_type == BPF_MAP_TYPE_STACK) {
199                 err = map->ops->map_push_elem(map, value, flags);
200         } else {
201                 rcu_read_lock();
202                 err = map->ops->map_update_elem(map, key, value, flags);
203                 rcu_read_unlock();
204         }
205         bpf_enable_instrumentation();
206         maybe_wait_bpf_programs(map);
207
208         return err;
209 }
210
211 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
212                               __u64 flags)
213 {
214         void *ptr;
215         int err;
216
217         if (bpf_map_is_dev_bound(map))
218                 return bpf_map_offload_lookup_elem(map, key, value);
219
220         bpf_disable_instrumentation();
221         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
222             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
223                 err = bpf_percpu_hash_copy(map, key, value);
224         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
225                 err = bpf_percpu_array_copy(map, key, value);
226         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
227                 err = bpf_percpu_cgroup_storage_copy(map, key, value);
228         } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
229                 err = bpf_stackmap_copy(map, key, value);
230         } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
231                 err = bpf_fd_array_map_lookup_elem(map, key, value);
232         } else if (IS_FD_HASH(map)) {
233                 err = bpf_fd_htab_map_lookup_elem(map, key, value);
234         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
235                 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
236         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
237                    map->map_type == BPF_MAP_TYPE_STACK) {
238                 err = map->ops->map_peek_elem(map, value);
239         } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
240                 /* struct_ops map requires directly updating "value" */
241                 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
242         } else {
243                 rcu_read_lock();
244                 if (map->ops->map_lookup_elem_sys_only)
245                         ptr = map->ops->map_lookup_elem_sys_only(map, key);
246                 else
247                         ptr = map->ops->map_lookup_elem(map, key);
248                 if (IS_ERR(ptr)) {
249                         err = PTR_ERR(ptr);
250                 } else if (!ptr) {
251                         err = -ENOENT;
252                 } else {
253                         err = 0;
254                         if (flags & BPF_F_LOCK)
255                                 /* lock 'ptr' and copy everything but lock */
256                                 copy_map_value_locked(map, value, ptr, true);
257                         else
258                                 copy_map_value(map, value, ptr);
259                         /* mask lock, since value wasn't zero inited */
260                         check_and_init_map_lock(map, value);
261                 }
262                 rcu_read_unlock();
263         }
264
265         bpf_enable_instrumentation();
266         maybe_wait_bpf_programs(map);
267
268         return err;
269 }
270
271 /* Please, do not use this function outside from the map creation path
272  * (e.g. in map update path) without taking care of setting the active
273  * memory cgroup (see at bpf_map_kmalloc_node() for example).
274  */
275 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
276 {
277         /* We really just want to fail instead of triggering OOM killer
278          * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
279          * which is used for lower order allocation requests.
280          *
281          * It has been observed that higher order allocation requests done by
282          * vmalloc with __GFP_NORETRY being set might fail due to not trying
283          * to reclaim memory from the page cache, thus we set
284          * __GFP_RETRY_MAYFAIL to avoid such situations.
285          */
286
287         const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
288         unsigned int flags = 0;
289         unsigned long align = 1;
290         void *area;
291
292         if (size >= SIZE_MAX)
293                 return NULL;
294
295         /* kmalloc()'ed memory can't be mmap()'ed */
296         if (mmapable) {
297                 BUG_ON(!PAGE_ALIGNED(size));
298                 align = SHMLBA;
299                 flags = VM_USERMAP;
300         } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
301                 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
302                                     numa_node);
303                 if (area != NULL)
304                         return area;
305         }
306
307         return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
308                         gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
309                         flags, numa_node, __builtin_return_address(0));
310 }
311
312 void *bpf_map_area_alloc(u64 size, int numa_node)
313 {
314         return __bpf_map_area_alloc(size, numa_node, false);
315 }
316
317 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
318 {
319         return __bpf_map_area_alloc(size, numa_node, true);
320 }
321
322 void bpf_map_area_free(void *area)
323 {
324         kvfree(area);
325 }
326
327 static u32 bpf_map_flags_retain_permanent(u32 flags)
328 {
329         /* Some map creation flags are not tied to the map object but
330          * rather to the map fd instead, so they have no meaning upon
331          * map object inspection since multiple file descriptors with
332          * different (access) properties can exist here. Thus, given
333          * this has zero meaning for the map itself, lets clear these
334          * from here.
335          */
336         return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
337 }
338
339 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
340 {
341         map->map_type = attr->map_type;
342         map->key_size = attr->key_size;
343         map->value_size = attr->value_size;
344         map->max_entries = attr->max_entries;
345         map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
346         map->numa_node = bpf_map_attr_numa_node(attr);
347 }
348
349 static int bpf_map_alloc_id(struct bpf_map *map)
350 {
351         int id;
352
353         idr_preload(GFP_KERNEL);
354         spin_lock_bh(&map_idr_lock);
355         id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
356         if (id > 0)
357                 map->id = id;
358         spin_unlock_bh(&map_idr_lock);
359         idr_preload_end();
360
361         if (WARN_ON_ONCE(!id))
362                 return -ENOSPC;
363
364         return id > 0 ? 0 : id;
365 }
366
367 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
368 {
369         unsigned long flags;
370
371         /* Offloaded maps are removed from the IDR store when their device
372          * disappears - even if someone holds an fd to them they are unusable,
373          * the memory is gone, all ops will fail; they are simply waiting for
374          * refcnt to drop to be freed.
375          */
376         if (!map->id)
377                 return;
378
379         if (do_idr_lock)
380                 spin_lock_irqsave(&map_idr_lock, flags);
381         else
382                 __acquire(&map_idr_lock);
383
384         idr_remove(&map_idr, map->id);
385         map->id = 0;
386
387         if (do_idr_lock)
388                 spin_unlock_irqrestore(&map_idr_lock, flags);
389         else
390                 __release(&map_idr_lock);
391 }
392
393 #ifdef CONFIG_MEMCG_KMEM
394 static void bpf_map_save_memcg(struct bpf_map *map)
395 {
396         map->memcg = get_mem_cgroup_from_mm(current->mm);
397 }
398
399 static void bpf_map_release_memcg(struct bpf_map *map)
400 {
401         mem_cgroup_put(map->memcg);
402 }
403
404 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
405                            int node)
406 {
407         struct mem_cgroup *old_memcg;
408         void *ptr;
409
410         old_memcg = set_active_memcg(map->memcg);
411         ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
412         set_active_memcg(old_memcg);
413
414         return ptr;
415 }
416
417 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
418 {
419         struct mem_cgroup *old_memcg;
420         void *ptr;
421
422         old_memcg = set_active_memcg(map->memcg);
423         ptr = kzalloc(size, flags | __GFP_ACCOUNT);
424         set_active_memcg(old_memcg);
425
426         return ptr;
427 }
428
429 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
430                                     size_t align, gfp_t flags)
431 {
432         struct mem_cgroup *old_memcg;
433         void __percpu *ptr;
434
435         old_memcg = set_active_memcg(map->memcg);
436         ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
437         set_active_memcg(old_memcg);
438
439         return ptr;
440 }
441
442 #else
443 static void bpf_map_save_memcg(struct bpf_map *map)
444 {
445 }
446
447 static void bpf_map_release_memcg(struct bpf_map *map)
448 {
449 }
450 #endif
451
452 /* called from workqueue */
453 static void bpf_map_free_deferred(struct work_struct *work)
454 {
455         struct bpf_map *map = container_of(work, struct bpf_map, work);
456
457         security_bpf_map_free(map);
458         bpf_map_release_memcg(map);
459         /* implementation dependent freeing */
460         map->ops->map_free(map);
461 }
462
463 static void bpf_map_put_uref(struct bpf_map *map)
464 {
465         if (atomic64_dec_and_test(&map->usercnt)) {
466                 if (map->ops->map_release_uref)
467                         map->ops->map_release_uref(map);
468         }
469 }
470
471 /* decrement map refcnt and schedule it for freeing via workqueue
472  * (unrelying map implementation ops->map_free() might sleep)
473  */
474 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
475 {
476         if (atomic64_dec_and_test(&map->refcnt)) {
477                 /* bpf_map_free_id() must be called first */
478                 bpf_map_free_id(map, do_idr_lock);
479                 btf_put(map->btf);
480                 INIT_WORK(&map->work, bpf_map_free_deferred);
481                 schedule_work(&map->work);
482         }
483 }
484
485 void bpf_map_put(struct bpf_map *map)
486 {
487         __bpf_map_put(map, true);
488 }
489 EXPORT_SYMBOL_GPL(bpf_map_put);
490
491 void bpf_map_put_with_uref(struct bpf_map *map)
492 {
493         bpf_map_put_uref(map);
494         bpf_map_put(map);
495 }
496
497 static int bpf_map_release(struct inode *inode, struct file *filp)
498 {
499         struct bpf_map *map = filp->private_data;
500
501         if (map->ops->map_release)
502                 map->ops->map_release(map, filp);
503
504         bpf_map_put_with_uref(map);
505         return 0;
506 }
507
508 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
509 {
510         fmode_t mode = f.file->f_mode;
511
512         /* Our file permissions may have been overridden by global
513          * map permissions facing syscall side.
514          */
515         if (READ_ONCE(map->frozen))
516                 mode &= ~FMODE_CAN_WRITE;
517         return mode;
518 }
519
520 #ifdef CONFIG_PROC_FS
521 /* Provides an approximation of the map's memory footprint.
522  * Used only to provide a backward compatibility and display
523  * a reasonable "memlock" info.
524  */
525 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
526 {
527         unsigned long size;
528
529         size = round_up(map->key_size + bpf_map_value_size(map), 8);
530
531         return round_up(map->max_entries * size, PAGE_SIZE);
532 }
533
534 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
535 {
536         const struct bpf_map *map = filp->private_data;
537         const struct bpf_array *array;
538         u32 type = 0, jited = 0;
539
540         if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
541                 array = container_of(map, struct bpf_array, map);
542                 type  = array->aux->type;
543                 jited = array->aux->jited;
544         }
545
546         seq_printf(m,
547                    "map_type:\t%u\n"
548                    "key_size:\t%u\n"
549                    "value_size:\t%u\n"
550                    "max_entries:\t%u\n"
551                    "map_flags:\t%#x\n"
552                    "memlock:\t%lu\n"
553                    "map_id:\t%u\n"
554                    "frozen:\t%u\n",
555                    map->map_type,
556                    map->key_size,
557                    map->value_size,
558                    map->max_entries,
559                    map->map_flags,
560                    bpf_map_memory_footprint(map),
561                    map->id,
562                    READ_ONCE(map->frozen));
563         if (type) {
564                 seq_printf(m, "owner_prog_type:\t%u\n", type);
565                 seq_printf(m, "owner_jited:\t%u\n", jited);
566         }
567 }
568 #endif
569
570 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
571                               loff_t *ppos)
572 {
573         /* We need this handler such that alloc_file() enables
574          * f_mode with FMODE_CAN_READ.
575          */
576         return -EINVAL;
577 }
578
579 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
580                                size_t siz, loff_t *ppos)
581 {
582         /* We need this handler such that alloc_file() enables
583          * f_mode with FMODE_CAN_WRITE.
584          */
585         return -EINVAL;
586 }
587
588 /* called for any extra memory-mapped regions (except initial) */
589 static void bpf_map_mmap_open(struct vm_area_struct *vma)
590 {
591         struct bpf_map *map = vma->vm_file->private_data;
592
593         if (vma->vm_flags & VM_MAYWRITE) {
594                 mutex_lock(&map->freeze_mutex);
595                 map->writecnt++;
596                 mutex_unlock(&map->freeze_mutex);
597         }
598 }
599
600 /* called for all unmapped memory region (including initial) */
601 static void bpf_map_mmap_close(struct vm_area_struct *vma)
602 {
603         struct bpf_map *map = vma->vm_file->private_data;
604
605         if (vma->vm_flags & VM_MAYWRITE) {
606                 mutex_lock(&map->freeze_mutex);
607                 map->writecnt--;
608                 mutex_unlock(&map->freeze_mutex);
609         }
610 }
611
612 static const struct vm_operations_struct bpf_map_default_vmops = {
613         .open           = bpf_map_mmap_open,
614         .close          = bpf_map_mmap_close,
615 };
616
617 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
618 {
619         struct bpf_map *map = filp->private_data;
620         int err;
621
622         if (!map->ops->map_mmap || map_value_has_spin_lock(map))
623                 return -ENOTSUPP;
624
625         if (!(vma->vm_flags & VM_SHARED))
626                 return -EINVAL;
627
628         mutex_lock(&map->freeze_mutex);
629
630         if (vma->vm_flags & VM_WRITE) {
631                 if (map->frozen) {
632                         err = -EPERM;
633                         goto out;
634                 }
635                 /* map is meant to be read-only, so do not allow mapping as
636                  * writable, because it's possible to leak a writable page
637                  * reference and allows user-space to still modify it after
638                  * freezing, while verifier will assume contents do not change
639                  */
640                 if (map->map_flags & BPF_F_RDONLY_PROG) {
641                         err = -EACCES;
642                         goto out;
643                 }
644         }
645
646         /* set default open/close callbacks */
647         vma->vm_ops = &bpf_map_default_vmops;
648         vma->vm_private_data = map;
649         vma->vm_flags &= ~VM_MAYEXEC;
650         if (!(vma->vm_flags & VM_WRITE))
651                 /* disallow re-mapping with PROT_WRITE */
652                 vma->vm_flags &= ~VM_MAYWRITE;
653
654         err = map->ops->map_mmap(map, vma);
655         if (err)
656                 goto out;
657
658         if (vma->vm_flags & VM_MAYWRITE)
659                 map->writecnt++;
660 out:
661         mutex_unlock(&map->freeze_mutex);
662         return err;
663 }
664
665 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
666 {
667         struct bpf_map *map = filp->private_data;
668
669         if (map->ops->map_poll)
670                 return map->ops->map_poll(map, filp, pts);
671
672         return EPOLLERR;
673 }
674
675 const struct file_operations bpf_map_fops = {
676 #ifdef CONFIG_PROC_FS
677         .show_fdinfo    = bpf_map_show_fdinfo,
678 #endif
679         .release        = bpf_map_release,
680         .read           = bpf_dummy_read,
681         .write          = bpf_dummy_write,
682         .mmap           = bpf_map_mmap,
683         .poll           = bpf_map_poll,
684 };
685
686 int bpf_map_new_fd(struct bpf_map *map, int flags)
687 {
688         int ret;
689
690         ret = security_bpf_map(map, OPEN_FMODE(flags));
691         if (ret < 0)
692                 return ret;
693
694         return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
695                                 flags | O_CLOEXEC);
696 }
697
698 int bpf_get_file_flag(int flags)
699 {
700         if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
701                 return -EINVAL;
702         if (flags & BPF_F_RDONLY)
703                 return O_RDONLY;
704         if (flags & BPF_F_WRONLY)
705                 return O_WRONLY;
706         return O_RDWR;
707 }
708
709 /* helper macro to check that unused fields 'union bpf_attr' are zero */
710 #define CHECK_ATTR(CMD) \
711         memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
712                    sizeof(attr->CMD##_LAST_FIELD), 0, \
713                    sizeof(*attr) - \
714                    offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
715                    sizeof(attr->CMD##_LAST_FIELD)) != NULL
716
717 /* dst and src must have at least "size" number of bytes.
718  * Return strlen on success and < 0 on error.
719  */
720 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
721 {
722         const char *end = src + size;
723         const char *orig_src = src;
724
725         memset(dst, 0, size);
726         /* Copy all isalnum(), '_' and '.' chars. */
727         while (src < end && *src) {
728                 if (!isalnum(*src) &&
729                     *src != '_' && *src != '.')
730                         return -EINVAL;
731                 *dst++ = *src++;
732         }
733
734         /* No '\0' found in "size" number of bytes */
735         if (src == end)
736                 return -EINVAL;
737
738         return src - orig_src;
739 }
740
741 int map_check_no_btf(const struct bpf_map *map,
742                      const struct btf *btf,
743                      const struct btf_type *key_type,
744                      const struct btf_type *value_type)
745 {
746         return -ENOTSUPP;
747 }
748
749 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
750                          u32 btf_key_id, u32 btf_value_id)
751 {
752         const struct btf_type *key_type, *value_type;
753         u32 key_size, value_size;
754         int ret = 0;
755
756         /* Some maps allow key to be unspecified. */
757         if (btf_key_id) {
758                 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
759                 if (!key_type || key_size != map->key_size)
760                         return -EINVAL;
761         } else {
762                 key_type = btf_type_by_id(btf, 0);
763                 if (!map->ops->map_check_btf)
764                         return -EINVAL;
765         }
766
767         value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
768         if (!value_type || value_size != map->value_size)
769                 return -EINVAL;
770
771         map->spin_lock_off = btf_find_spin_lock(btf, value_type);
772
773         if (map_value_has_spin_lock(map)) {
774                 if (map->map_flags & BPF_F_RDONLY_PROG)
775                         return -EACCES;
776                 if (map->map_type != BPF_MAP_TYPE_HASH &&
777                     map->map_type != BPF_MAP_TYPE_ARRAY &&
778                     map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
779                     map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
780                     map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
781                     map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
782                         return -ENOTSUPP;
783                 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
784                     map->value_size) {
785                         WARN_ONCE(1,
786                                   "verifier bug spin_lock_off %d value_size %d\n",
787                                   map->spin_lock_off, map->value_size);
788                         return -EFAULT;
789                 }
790         }
791
792         if (map->ops->map_check_btf)
793                 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
794
795         return ret;
796 }
797
798 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
799 /* called via syscall */
800 static int map_create(union bpf_attr *attr)
801 {
802         int numa_node = bpf_map_attr_numa_node(attr);
803         struct bpf_map *map;
804         int f_flags;
805         int err;
806
807         err = CHECK_ATTR(BPF_MAP_CREATE);
808         if (err)
809                 return -EINVAL;
810
811         if (attr->btf_vmlinux_value_type_id) {
812                 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
813                     attr->btf_key_type_id || attr->btf_value_type_id)
814                         return -EINVAL;
815         } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
816                 return -EINVAL;
817         }
818
819         f_flags = bpf_get_file_flag(attr->map_flags);
820         if (f_flags < 0)
821                 return f_flags;
822
823         if (numa_node != NUMA_NO_NODE &&
824             ((unsigned int)numa_node >= nr_node_ids ||
825              !node_online(numa_node)))
826                 return -EINVAL;
827
828         /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
829         map = find_and_alloc_map(attr);
830         if (IS_ERR(map))
831                 return PTR_ERR(map);
832
833         err = bpf_obj_name_cpy(map->name, attr->map_name,
834                                sizeof(attr->map_name));
835         if (err < 0)
836                 goto free_map;
837
838         atomic64_set(&map->refcnt, 1);
839         atomic64_set(&map->usercnt, 1);
840         mutex_init(&map->freeze_mutex);
841
842         map->spin_lock_off = -EINVAL;
843         if (attr->btf_key_type_id || attr->btf_value_type_id ||
844             /* Even the map's value is a kernel's struct,
845              * the bpf_prog.o must have BTF to begin with
846              * to figure out the corresponding kernel's
847              * counter part.  Thus, attr->btf_fd has
848              * to be valid also.
849              */
850             attr->btf_vmlinux_value_type_id) {
851                 struct btf *btf;
852
853                 btf = btf_get_by_fd(attr->btf_fd);
854                 if (IS_ERR(btf)) {
855                         err = PTR_ERR(btf);
856                         goto free_map;
857                 }
858                 map->btf = btf;
859
860                 if (attr->btf_value_type_id) {
861                         err = map_check_btf(map, btf, attr->btf_key_type_id,
862                                             attr->btf_value_type_id);
863                         if (err)
864                                 goto free_map;
865                 }
866
867                 map->btf_key_type_id = attr->btf_key_type_id;
868                 map->btf_value_type_id = attr->btf_value_type_id;
869                 map->btf_vmlinux_value_type_id =
870                         attr->btf_vmlinux_value_type_id;
871         }
872
873         err = security_bpf_map_alloc(map);
874         if (err)
875                 goto free_map;
876
877         err = bpf_map_alloc_id(map);
878         if (err)
879                 goto free_map_sec;
880
881         bpf_map_save_memcg(map);
882
883         err = bpf_map_new_fd(map, f_flags);
884         if (err < 0) {
885                 /* failed to allocate fd.
886                  * bpf_map_put_with_uref() is needed because the above
887                  * bpf_map_alloc_id() has published the map
888                  * to the userspace and the userspace may
889                  * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
890                  */
891                 bpf_map_put_with_uref(map);
892                 return err;
893         }
894
895         return err;
896
897 free_map_sec:
898         security_bpf_map_free(map);
899 free_map:
900         btf_put(map->btf);
901         map->ops->map_free(map);
902         return err;
903 }
904
905 /* if error is returned, fd is released.
906  * On success caller should complete fd access with matching fdput()
907  */
908 struct bpf_map *__bpf_map_get(struct fd f)
909 {
910         if (!f.file)
911                 return ERR_PTR(-EBADF);
912         if (f.file->f_op != &bpf_map_fops) {
913                 fdput(f);
914                 return ERR_PTR(-EINVAL);
915         }
916
917         return f.file->private_data;
918 }
919
920 void bpf_map_inc(struct bpf_map *map)
921 {
922         atomic64_inc(&map->refcnt);
923 }
924 EXPORT_SYMBOL_GPL(bpf_map_inc);
925
926 void bpf_map_inc_with_uref(struct bpf_map *map)
927 {
928         atomic64_inc(&map->refcnt);
929         atomic64_inc(&map->usercnt);
930 }
931 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
932
933 struct bpf_map *bpf_map_get(u32 ufd)
934 {
935         struct fd f = fdget(ufd);
936         struct bpf_map *map;
937
938         map = __bpf_map_get(f);
939         if (IS_ERR(map))
940                 return map;
941
942         bpf_map_inc(map);
943         fdput(f);
944
945         return map;
946 }
947
948 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
949 {
950         struct fd f = fdget(ufd);
951         struct bpf_map *map;
952
953         map = __bpf_map_get(f);
954         if (IS_ERR(map))
955                 return map;
956
957         bpf_map_inc_with_uref(map);
958         fdput(f);
959
960         return map;
961 }
962
963 /* map_idr_lock should have been held */
964 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
965 {
966         int refold;
967
968         refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
969         if (!refold)
970                 return ERR_PTR(-ENOENT);
971         if (uref)
972                 atomic64_inc(&map->usercnt);
973
974         return map;
975 }
976
977 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
978 {
979         spin_lock_bh(&map_idr_lock);
980         map = __bpf_map_inc_not_zero(map, false);
981         spin_unlock_bh(&map_idr_lock);
982
983         return map;
984 }
985 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
986
987 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
988 {
989         return -ENOTSUPP;
990 }
991
992 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
993 {
994         if (key_size)
995                 return memdup_user(ukey, key_size);
996
997         if (ukey)
998                 return ERR_PTR(-EINVAL);
999
1000         return NULL;
1001 }
1002
1003 /* last field in 'union bpf_attr' used by this command */
1004 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1005
1006 static int map_lookup_elem(union bpf_attr *attr)
1007 {
1008         void __user *ukey = u64_to_user_ptr(attr->key);
1009         void __user *uvalue = u64_to_user_ptr(attr->value);
1010         int ufd = attr->map_fd;
1011         struct bpf_map *map;
1012         void *key, *value;
1013         u32 value_size;
1014         struct fd f;
1015         int err;
1016
1017         if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1018                 return -EINVAL;
1019
1020         if (attr->flags & ~BPF_F_LOCK)
1021                 return -EINVAL;
1022
1023         f = fdget(ufd);
1024         map = __bpf_map_get(f);
1025         if (IS_ERR(map))
1026                 return PTR_ERR(map);
1027         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1028                 err = -EPERM;
1029                 goto err_put;
1030         }
1031
1032         if ((attr->flags & BPF_F_LOCK) &&
1033             !map_value_has_spin_lock(map)) {
1034                 err = -EINVAL;
1035                 goto err_put;
1036         }
1037
1038         key = __bpf_copy_key(ukey, map->key_size);
1039         if (IS_ERR(key)) {
1040                 err = PTR_ERR(key);
1041                 goto err_put;
1042         }
1043
1044         value_size = bpf_map_value_size(map);
1045
1046         err = -ENOMEM;
1047         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1048         if (!value)
1049                 goto free_key;
1050
1051         err = bpf_map_copy_value(map, key, value, attr->flags);
1052         if (err)
1053                 goto free_value;
1054
1055         err = -EFAULT;
1056         if (copy_to_user(uvalue, value, value_size) != 0)
1057                 goto free_value;
1058
1059         err = 0;
1060
1061 free_value:
1062         kfree(value);
1063 free_key:
1064         kfree(key);
1065 err_put:
1066         fdput(f);
1067         return err;
1068 }
1069
1070
1071 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1072
1073 static int map_update_elem(union bpf_attr *attr)
1074 {
1075         void __user *ukey = u64_to_user_ptr(attr->key);
1076         void __user *uvalue = u64_to_user_ptr(attr->value);
1077         int ufd = attr->map_fd;
1078         struct bpf_map *map;
1079         void *key, *value;
1080         u32 value_size;
1081         struct fd f;
1082         int err;
1083
1084         if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1085                 return -EINVAL;
1086
1087         f = fdget(ufd);
1088         map = __bpf_map_get(f);
1089         if (IS_ERR(map))
1090                 return PTR_ERR(map);
1091         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1092                 err = -EPERM;
1093                 goto err_put;
1094         }
1095
1096         if ((attr->flags & BPF_F_LOCK) &&
1097             !map_value_has_spin_lock(map)) {
1098                 err = -EINVAL;
1099                 goto err_put;
1100         }
1101
1102         key = __bpf_copy_key(ukey, map->key_size);
1103         if (IS_ERR(key)) {
1104                 err = PTR_ERR(key);
1105                 goto err_put;
1106         }
1107
1108         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1109             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1110             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
1111             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
1112                 value_size = round_up(map->value_size, 8) * num_possible_cpus();
1113         else
1114                 value_size = map->value_size;
1115
1116         err = -ENOMEM;
1117         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1118         if (!value)
1119                 goto free_key;
1120
1121         err = -EFAULT;
1122         if (copy_from_user(value, uvalue, value_size) != 0)
1123                 goto free_value;
1124
1125         err = bpf_map_update_value(map, f, key, value, attr->flags);
1126
1127 free_value:
1128         kfree(value);
1129 free_key:
1130         kfree(key);
1131 err_put:
1132         fdput(f);
1133         return err;
1134 }
1135
1136 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1137
1138 static int map_delete_elem(union bpf_attr *attr)
1139 {
1140         void __user *ukey = u64_to_user_ptr(attr->key);
1141         int ufd = attr->map_fd;
1142         struct bpf_map *map;
1143         struct fd f;
1144         void *key;
1145         int err;
1146
1147         if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1148                 return -EINVAL;
1149
1150         f = fdget(ufd);
1151         map = __bpf_map_get(f);
1152         if (IS_ERR(map))
1153                 return PTR_ERR(map);
1154         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1155                 err = -EPERM;
1156                 goto err_put;
1157         }
1158
1159         key = __bpf_copy_key(ukey, map->key_size);
1160         if (IS_ERR(key)) {
1161                 err = PTR_ERR(key);
1162                 goto err_put;
1163         }
1164
1165         if (bpf_map_is_dev_bound(map)) {
1166                 err = bpf_map_offload_delete_elem(map, key);
1167                 goto out;
1168         } else if (IS_FD_PROG_ARRAY(map) ||
1169                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1170                 /* These maps require sleepable context */
1171                 err = map->ops->map_delete_elem(map, key);
1172                 goto out;
1173         }
1174
1175         bpf_disable_instrumentation();
1176         rcu_read_lock();
1177         err = map->ops->map_delete_elem(map, key);
1178         rcu_read_unlock();
1179         bpf_enable_instrumentation();
1180         maybe_wait_bpf_programs(map);
1181 out:
1182         kfree(key);
1183 err_put:
1184         fdput(f);
1185         return err;
1186 }
1187
1188 /* last field in 'union bpf_attr' used by this command */
1189 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1190
1191 static int map_get_next_key(union bpf_attr *attr)
1192 {
1193         void __user *ukey = u64_to_user_ptr(attr->key);
1194         void __user *unext_key = u64_to_user_ptr(attr->next_key);
1195         int ufd = attr->map_fd;
1196         struct bpf_map *map;
1197         void *key, *next_key;
1198         struct fd f;
1199         int err;
1200
1201         if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1202                 return -EINVAL;
1203
1204         f = fdget(ufd);
1205         map = __bpf_map_get(f);
1206         if (IS_ERR(map))
1207                 return PTR_ERR(map);
1208         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1209                 err = -EPERM;
1210                 goto err_put;
1211         }
1212
1213         if (ukey) {
1214                 key = __bpf_copy_key(ukey, map->key_size);
1215                 if (IS_ERR(key)) {
1216                         err = PTR_ERR(key);
1217                         goto err_put;
1218                 }
1219         } else {
1220                 key = NULL;
1221         }
1222
1223         err = -ENOMEM;
1224         next_key = kmalloc(map->key_size, GFP_USER);
1225         if (!next_key)
1226                 goto free_key;
1227
1228         if (bpf_map_is_dev_bound(map)) {
1229                 err = bpf_map_offload_get_next_key(map, key, next_key);
1230                 goto out;
1231         }
1232
1233         rcu_read_lock();
1234         err = map->ops->map_get_next_key(map, key, next_key);
1235         rcu_read_unlock();
1236 out:
1237         if (err)
1238                 goto free_next_key;
1239
1240         err = -EFAULT;
1241         if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1242                 goto free_next_key;
1243
1244         err = 0;
1245
1246 free_next_key:
1247         kfree(next_key);
1248 free_key:
1249         kfree(key);
1250 err_put:
1251         fdput(f);
1252         return err;
1253 }
1254
1255 int generic_map_delete_batch(struct bpf_map *map,
1256                              const union bpf_attr *attr,
1257                              union bpf_attr __user *uattr)
1258 {
1259         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1260         u32 cp, max_count;
1261         int err = 0;
1262         void *key;
1263
1264         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1265                 return -EINVAL;
1266
1267         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1268             !map_value_has_spin_lock(map)) {
1269                 return -EINVAL;
1270         }
1271
1272         max_count = attr->batch.count;
1273         if (!max_count)
1274                 return 0;
1275
1276         key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1277         if (!key)
1278                 return -ENOMEM;
1279
1280         for (cp = 0; cp < max_count; cp++) {
1281                 err = -EFAULT;
1282                 if (copy_from_user(key, keys + cp * map->key_size,
1283                                    map->key_size))
1284                         break;
1285
1286                 if (bpf_map_is_dev_bound(map)) {
1287                         err = bpf_map_offload_delete_elem(map, key);
1288                         break;
1289                 }
1290
1291                 bpf_disable_instrumentation();
1292                 rcu_read_lock();
1293                 err = map->ops->map_delete_elem(map, key);
1294                 rcu_read_unlock();
1295                 bpf_enable_instrumentation();
1296                 maybe_wait_bpf_programs(map);
1297                 if (err)
1298                         break;
1299         }
1300         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1301                 err = -EFAULT;
1302
1303         kfree(key);
1304         return err;
1305 }
1306
1307 int generic_map_update_batch(struct bpf_map *map,
1308                              const union bpf_attr *attr,
1309                              union bpf_attr __user *uattr)
1310 {
1311         void __user *values = u64_to_user_ptr(attr->batch.values);
1312         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1313         u32 value_size, cp, max_count;
1314         int ufd = attr->map_fd;
1315         void *key, *value;
1316         struct fd f;
1317         int err = 0;
1318
1319         f = fdget(ufd);
1320         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1321                 return -EINVAL;
1322
1323         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1324             !map_value_has_spin_lock(map)) {
1325                 return -EINVAL;
1326         }
1327
1328         value_size = bpf_map_value_size(map);
1329
1330         max_count = attr->batch.count;
1331         if (!max_count)
1332                 return 0;
1333
1334         key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1335         if (!key)
1336                 return -ENOMEM;
1337
1338         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1339         if (!value) {
1340                 kfree(key);
1341                 return -ENOMEM;
1342         }
1343
1344         for (cp = 0; cp < max_count; cp++) {
1345                 err = -EFAULT;
1346                 if (copy_from_user(key, keys + cp * map->key_size,
1347                     map->key_size) ||
1348                     copy_from_user(value, values + cp * value_size, value_size))
1349                         break;
1350
1351                 err = bpf_map_update_value(map, f, key, value,
1352                                            attr->batch.elem_flags);
1353
1354                 if (err)
1355                         break;
1356         }
1357
1358         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1359                 err = -EFAULT;
1360
1361         kfree(value);
1362         kfree(key);
1363         return err;
1364 }
1365
1366 #define MAP_LOOKUP_RETRIES 3
1367
1368 int generic_map_lookup_batch(struct bpf_map *map,
1369                                     const union bpf_attr *attr,
1370                                     union bpf_attr __user *uattr)
1371 {
1372         void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1373         void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1374         void __user *values = u64_to_user_ptr(attr->batch.values);
1375         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1376         void *buf, *buf_prevkey, *prev_key, *key, *value;
1377         int err, retry = MAP_LOOKUP_RETRIES;
1378         u32 value_size, cp, max_count;
1379
1380         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1381                 return -EINVAL;
1382
1383         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1384             !map_value_has_spin_lock(map))
1385                 return -EINVAL;
1386
1387         value_size = bpf_map_value_size(map);
1388
1389         max_count = attr->batch.count;
1390         if (!max_count)
1391                 return 0;
1392
1393         if (put_user(0, &uattr->batch.count))
1394                 return -EFAULT;
1395
1396         buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1397         if (!buf_prevkey)
1398                 return -ENOMEM;
1399
1400         buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1401         if (!buf) {
1402                 kfree(buf_prevkey);
1403                 return -ENOMEM;
1404         }
1405
1406         err = -EFAULT;
1407         prev_key = NULL;
1408         if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1409                 goto free_buf;
1410         key = buf;
1411         value = key + map->key_size;
1412         if (ubatch)
1413                 prev_key = buf_prevkey;
1414
1415         for (cp = 0; cp < max_count;) {
1416                 rcu_read_lock();
1417                 err = map->ops->map_get_next_key(map, prev_key, key);
1418                 rcu_read_unlock();
1419                 if (err)
1420                         break;
1421                 err = bpf_map_copy_value(map, key, value,
1422                                          attr->batch.elem_flags);
1423
1424                 if (err == -ENOENT) {
1425                         if (retry) {
1426                                 retry--;
1427                                 continue;
1428                         }
1429                         err = -EINTR;
1430                         break;
1431                 }
1432
1433                 if (err)
1434                         goto free_buf;
1435
1436                 if (copy_to_user(keys + cp * map->key_size, key,
1437                                  map->key_size)) {
1438                         err = -EFAULT;
1439                         goto free_buf;
1440                 }
1441                 if (copy_to_user(values + cp * value_size, value, value_size)) {
1442                         err = -EFAULT;
1443                         goto free_buf;
1444                 }
1445
1446                 if (!prev_key)
1447                         prev_key = buf_prevkey;
1448
1449                 swap(prev_key, key);
1450                 retry = MAP_LOOKUP_RETRIES;
1451                 cp++;
1452         }
1453
1454         if (err == -EFAULT)
1455                 goto free_buf;
1456
1457         if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1458                     (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1459                 err = -EFAULT;
1460
1461 free_buf:
1462         kfree(buf_prevkey);
1463         kfree(buf);
1464         return err;
1465 }
1466
1467 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1468
1469 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1470 {
1471         void __user *ukey = u64_to_user_ptr(attr->key);
1472         void __user *uvalue = u64_to_user_ptr(attr->value);
1473         int ufd = attr->map_fd;
1474         struct bpf_map *map;
1475         void *key, *value;
1476         u32 value_size;
1477         struct fd f;
1478         int err;
1479
1480         if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1481                 return -EINVAL;
1482
1483         f = fdget(ufd);
1484         map = __bpf_map_get(f);
1485         if (IS_ERR(map))
1486                 return PTR_ERR(map);
1487         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1488             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1489                 err = -EPERM;
1490                 goto err_put;
1491         }
1492
1493         key = __bpf_copy_key(ukey, map->key_size);
1494         if (IS_ERR(key)) {
1495                 err = PTR_ERR(key);
1496                 goto err_put;
1497         }
1498
1499         value_size = map->value_size;
1500
1501         err = -ENOMEM;
1502         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1503         if (!value)
1504                 goto free_key;
1505
1506         if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1507             map->map_type == BPF_MAP_TYPE_STACK) {
1508                 err = map->ops->map_pop_elem(map, value);
1509         } else {
1510                 err = -ENOTSUPP;
1511         }
1512
1513         if (err)
1514                 goto free_value;
1515
1516         if (copy_to_user(uvalue, value, value_size) != 0) {
1517                 err = -EFAULT;
1518                 goto free_value;
1519         }
1520
1521         err = 0;
1522
1523 free_value:
1524         kfree(value);
1525 free_key:
1526         kfree(key);
1527 err_put:
1528         fdput(f);
1529         return err;
1530 }
1531
1532 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1533
1534 static int map_freeze(const union bpf_attr *attr)
1535 {
1536         int err = 0, ufd = attr->map_fd;
1537         struct bpf_map *map;
1538         struct fd f;
1539
1540         if (CHECK_ATTR(BPF_MAP_FREEZE))
1541                 return -EINVAL;
1542
1543         f = fdget(ufd);
1544         map = __bpf_map_get(f);
1545         if (IS_ERR(map))
1546                 return PTR_ERR(map);
1547
1548         if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1549                 fdput(f);
1550                 return -ENOTSUPP;
1551         }
1552
1553         mutex_lock(&map->freeze_mutex);
1554
1555         if (map->writecnt) {
1556                 err = -EBUSY;
1557                 goto err_put;
1558         }
1559         if (READ_ONCE(map->frozen)) {
1560                 err = -EBUSY;
1561                 goto err_put;
1562         }
1563         if (!bpf_capable()) {
1564                 err = -EPERM;
1565                 goto err_put;
1566         }
1567
1568         WRITE_ONCE(map->frozen, true);
1569 err_put:
1570         mutex_unlock(&map->freeze_mutex);
1571         fdput(f);
1572         return err;
1573 }
1574
1575 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1576 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1577         [_id] = & _name ## _prog_ops,
1578 #define BPF_MAP_TYPE(_id, _ops)
1579 #define BPF_LINK_TYPE(_id, _name)
1580 #include <linux/bpf_types.h>
1581 #undef BPF_PROG_TYPE
1582 #undef BPF_MAP_TYPE
1583 #undef BPF_LINK_TYPE
1584 };
1585
1586 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1587 {
1588         const struct bpf_prog_ops *ops;
1589
1590         if (type >= ARRAY_SIZE(bpf_prog_types))
1591                 return -EINVAL;
1592         type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1593         ops = bpf_prog_types[type];
1594         if (!ops)
1595                 return -EINVAL;
1596
1597         if (!bpf_prog_is_dev_bound(prog->aux))
1598                 prog->aux->ops = ops;
1599         else
1600                 prog->aux->ops = &bpf_offload_prog_ops;
1601         prog->type = type;
1602         return 0;
1603 }
1604
1605 enum bpf_audit {
1606         BPF_AUDIT_LOAD,
1607         BPF_AUDIT_UNLOAD,
1608         BPF_AUDIT_MAX,
1609 };
1610
1611 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1612         [BPF_AUDIT_LOAD]   = "LOAD",
1613         [BPF_AUDIT_UNLOAD] = "UNLOAD",
1614 };
1615
1616 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1617 {
1618         struct audit_context *ctx = NULL;
1619         struct audit_buffer *ab;
1620
1621         if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1622                 return;
1623         if (audit_enabled == AUDIT_OFF)
1624                 return;
1625         if (op == BPF_AUDIT_LOAD)
1626                 ctx = audit_context();
1627         ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1628         if (unlikely(!ab))
1629                 return;
1630         audit_log_format(ab, "prog-id=%u op=%s",
1631                          prog->aux->id, bpf_audit_str[op]);
1632         audit_log_end(ab);
1633 }
1634
1635 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1636 {
1637         int id;
1638
1639         idr_preload(GFP_KERNEL);
1640         spin_lock_bh(&prog_idr_lock);
1641         id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1642         if (id > 0)
1643                 prog->aux->id = id;
1644         spin_unlock_bh(&prog_idr_lock);
1645         idr_preload_end();
1646
1647         /* id is in [1, INT_MAX) */
1648         if (WARN_ON_ONCE(!id))
1649                 return -ENOSPC;
1650
1651         return id > 0 ? 0 : id;
1652 }
1653
1654 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1655 {
1656         /* cBPF to eBPF migrations are currently not in the idr store.
1657          * Offloaded programs are removed from the store when their device
1658          * disappears - even if someone grabs an fd to them they are unusable,
1659          * simply waiting for refcnt to drop to be freed.
1660          */
1661         if (!prog->aux->id)
1662                 return;
1663
1664         if (do_idr_lock)
1665                 spin_lock_bh(&prog_idr_lock);
1666         else
1667                 __acquire(&prog_idr_lock);
1668
1669         idr_remove(&prog_idr, prog->aux->id);
1670         prog->aux->id = 0;
1671
1672         if (do_idr_lock)
1673                 spin_unlock_bh(&prog_idr_lock);
1674         else
1675                 __release(&prog_idr_lock);
1676 }
1677
1678 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1679 {
1680         struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1681
1682         kvfree(aux->func_info);
1683         kfree(aux->func_info_aux);
1684         free_uid(aux->user);
1685         security_bpf_prog_free(aux);
1686         bpf_prog_free(aux->prog);
1687 }
1688
1689 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1690 {
1691         bpf_prog_kallsyms_del_all(prog);
1692         btf_put(prog->aux->btf);
1693         bpf_prog_free_linfo(prog);
1694         if (prog->aux->attach_btf)
1695                 btf_put(prog->aux->attach_btf);
1696
1697         if (deferred) {
1698                 if (prog->aux->sleepable)
1699                         call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
1700                 else
1701                         call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1702         } else {
1703                 __bpf_prog_put_rcu(&prog->aux->rcu);
1704         }
1705 }
1706
1707 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1708 {
1709         if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1710                 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1711                 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1712                 /* bpf_prog_free_id() must be called first */
1713                 bpf_prog_free_id(prog, do_idr_lock);
1714                 __bpf_prog_put_noref(prog, true);
1715         }
1716 }
1717
1718 void bpf_prog_put(struct bpf_prog *prog)
1719 {
1720         __bpf_prog_put(prog, true);
1721 }
1722 EXPORT_SYMBOL_GPL(bpf_prog_put);
1723
1724 static int bpf_prog_release(struct inode *inode, struct file *filp)
1725 {
1726         struct bpf_prog *prog = filp->private_data;
1727
1728         bpf_prog_put(prog);
1729         return 0;
1730 }
1731
1732 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1733                                struct bpf_prog_stats *stats)
1734 {
1735         u64 nsecs = 0, cnt = 0;
1736         int cpu;
1737
1738         for_each_possible_cpu(cpu) {
1739                 const struct bpf_prog_stats *st;
1740                 unsigned int start;
1741                 u64 tnsecs, tcnt;
1742
1743                 st = per_cpu_ptr(prog->aux->stats, cpu);
1744                 do {
1745                         start = u64_stats_fetch_begin_irq(&st->syncp);
1746                         tnsecs = st->nsecs;
1747                         tcnt = st->cnt;
1748                 } while (u64_stats_fetch_retry_irq(&st->syncp, start));
1749                 nsecs += tnsecs;
1750                 cnt += tcnt;
1751         }
1752         stats->nsecs = nsecs;
1753         stats->cnt = cnt;
1754 }
1755
1756 #ifdef CONFIG_PROC_FS
1757 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1758 {
1759         const struct bpf_prog *prog = filp->private_data;
1760         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1761         struct bpf_prog_stats stats;
1762
1763         bpf_prog_get_stats(prog, &stats);
1764         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1765         seq_printf(m,
1766                    "prog_type:\t%u\n"
1767                    "prog_jited:\t%u\n"
1768                    "prog_tag:\t%s\n"
1769                    "memlock:\t%llu\n"
1770                    "prog_id:\t%u\n"
1771                    "run_time_ns:\t%llu\n"
1772                    "run_cnt:\t%llu\n",
1773                    prog->type,
1774                    prog->jited,
1775                    prog_tag,
1776                    prog->pages * 1ULL << PAGE_SHIFT,
1777                    prog->aux->id,
1778                    stats.nsecs,
1779                    stats.cnt);
1780 }
1781 #endif
1782
1783 const struct file_operations bpf_prog_fops = {
1784 #ifdef CONFIG_PROC_FS
1785         .show_fdinfo    = bpf_prog_show_fdinfo,
1786 #endif
1787         .release        = bpf_prog_release,
1788         .read           = bpf_dummy_read,
1789         .write          = bpf_dummy_write,
1790 };
1791
1792 int bpf_prog_new_fd(struct bpf_prog *prog)
1793 {
1794         int ret;
1795
1796         ret = security_bpf_prog(prog);
1797         if (ret < 0)
1798                 return ret;
1799
1800         return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1801                                 O_RDWR | O_CLOEXEC);
1802 }
1803
1804 static struct bpf_prog *____bpf_prog_get(struct fd f)
1805 {
1806         if (!f.file)
1807                 return ERR_PTR(-EBADF);
1808         if (f.file->f_op != &bpf_prog_fops) {
1809                 fdput(f);
1810                 return ERR_PTR(-EINVAL);
1811         }
1812
1813         return f.file->private_data;
1814 }
1815
1816 void bpf_prog_add(struct bpf_prog *prog, int i)
1817 {
1818         atomic64_add(i, &prog->aux->refcnt);
1819 }
1820 EXPORT_SYMBOL_GPL(bpf_prog_add);
1821
1822 void bpf_prog_sub(struct bpf_prog *prog, int i)
1823 {
1824         /* Only to be used for undoing previous bpf_prog_add() in some
1825          * error path. We still know that another entity in our call
1826          * path holds a reference to the program, thus atomic_sub() can
1827          * be safely used in such cases!
1828          */
1829         WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1830 }
1831 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1832
1833 void bpf_prog_inc(struct bpf_prog *prog)
1834 {
1835         atomic64_inc(&prog->aux->refcnt);
1836 }
1837 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1838
1839 /* prog_idr_lock should have been held */
1840 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1841 {
1842         int refold;
1843
1844         refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1845
1846         if (!refold)
1847                 return ERR_PTR(-ENOENT);
1848
1849         return prog;
1850 }
1851 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1852
1853 bool bpf_prog_get_ok(struct bpf_prog *prog,
1854                             enum bpf_prog_type *attach_type, bool attach_drv)
1855 {
1856         /* not an attachment, just a refcount inc, always allow */
1857         if (!attach_type)
1858                 return true;
1859
1860         if (prog->type != *attach_type)
1861                 return false;
1862         if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1863                 return false;
1864
1865         return true;
1866 }
1867
1868 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1869                                        bool attach_drv)
1870 {
1871         struct fd f = fdget(ufd);
1872         struct bpf_prog *prog;
1873
1874         prog = ____bpf_prog_get(f);
1875         if (IS_ERR(prog))
1876                 return prog;
1877         if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1878                 prog = ERR_PTR(-EINVAL);
1879                 goto out;
1880         }
1881
1882         bpf_prog_inc(prog);
1883 out:
1884         fdput(f);
1885         return prog;
1886 }
1887
1888 struct bpf_prog *bpf_prog_get(u32 ufd)
1889 {
1890         return __bpf_prog_get(ufd, NULL, false);
1891 }
1892
1893 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1894                                        bool attach_drv)
1895 {
1896         return __bpf_prog_get(ufd, &type, attach_drv);
1897 }
1898 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1899
1900 /* Initially all BPF programs could be loaded w/o specifying
1901  * expected_attach_type. Later for some of them specifying expected_attach_type
1902  * at load time became required so that program could be validated properly.
1903  * Programs of types that are allowed to be loaded both w/ and w/o (for
1904  * backward compatibility) expected_attach_type, should have the default attach
1905  * type assigned to expected_attach_type for the latter case, so that it can be
1906  * validated later at attach time.
1907  *
1908  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1909  * prog type requires it but has some attach types that have to be backward
1910  * compatible.
1911  */
1912 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1913 {
1914         switch (attr->prog_type) {
1915         case BPF_PROG_TYPE_CGROUP_SOCK:
1916                 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1917                  * exist so checking for non-zero is the way to go here.
1918                  */
1919                 if (!attr->expected_attach_type)
1920                         attr->expected_attach_type =
1921                                 BPF_CGROUP_INET_SOCK_CREATE;
1922                 break;
1923         }
1924 }
1925
1926 static int
1927 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1928                            enum bpf_attach_type expected_attach_type,
1929                            struct btf *attach_btf, u32 btf_id,
1930                            struct bpf_prog *dst_prog)
1931 {
1932         if (btf_id) {
1933                 if (btf_id > BTF_MAX_TYPE)
1934                         return -EINVAL;
1935
1936                 if (!attach_btf && !dst_prog)
1937                         return -EINVAL;
1938
1939                 switch (prog_type) {
1940                 case BPF_PROG_TYPE_TRACING:
1941                 case BPF_PROG_TYPE_LSM:
1942                 case BPF_PROG_TYPE_STRUCT_OPS:
1943                 case BPF_PROG_TYPE_EXT:
1944                         break;
1945                 default:
1946                         return -EINVAL;
1947                 }
1948         }
1949
1950         if (attach_btf && (!btf_id || dst_prog))
1951                 return -EINVAL;
1952
1953         if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
1954             prog_type != BPF_PROG_TYPE_EXT)
1955                 return -EINVAL;
1956
1957         switch (prog_type) {
1958         case BPF_PROG_TYPE_CGROUP_SOCK:
1959                 switch (expected_attach_type) {
1960                 case BPF_CGROUP_INET_SOCK_CREATE:
1961                 case BPF_CGROUP_INET_SOCK_RELEASE:
1962                 case BPF_CGROUP_INET4_POST_BIND:
1963                 case BPF_CGROUP_INET6_POST_BIND:
1964                         return 0;
1965                 default:
1966                         return -EINVAL;
1967                 }
1968         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1969                 switch (expected_attach_type) {
1970                 case BPF_CGROUP_INET4_BIND:
1971                 case BPF_CGROUP_INET6_BIND:
1972                 case BPF_CGROUP_INET4_CONNECT:
1973                 case BPF_CGROUP_INET6_CONNECT:
1974                 case BPF_CGROUP_INET4_GETPEERNAME:
1975                 case BPF_CGROUP_INET6_GETPEERNAME:
1976                 case BPF_CGROUP_INET4_GETSOCKNAME:
1977                 case BPF_CGROUP_INET6_GETSOCKNAME:
1978                 case BPF_CGROUP_UDP4_SENDMSG:
1979                 case BPF_CGROUP_UDP6_SENDMSG:
1980                 case BPF_CGROUP_UDP4_RECVMSG:
1981                 case BPF_CGROUP_UDP6_RECVMSG:
1982                         return 0;
1983                 default:
1984                         return -EINVAL;
1985                 }
1986         case BPF_PROG_TYPE_CGROUP_SKB:
1987                 switch (expected_attach_type) {
1988                 case BPF_CGROUP_INET_INGRESS:
1989                 case BPF_CGROUP_INET_EGRESS:
1990                         return 0;
1991                 default:
1992                         return -EINVAL;
1993                 }
1994         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
1995                 switch (expected_attach_type) {
1996                 case BPF_CGROUP_SETSOCKOPT:
1997                 case BPF_CGROUP_GETSOCKOPT:
1998                         return 0;
1999                 default:
2000                         return -EINVAL;
2001                 }
2002         case BPF_PROG_TYPE_SK_LOOKUP:
2003                 if (expected_attach_type == BPF_SK_LOOKUP)
2004                         return 0;
2005                 return -EINVAL;
2006         case BPF_PROG_TYPE_EXT:
2007                 if (expected_attach_type)
2008                         return -EINVAL;
2009                 fallthrough;
2010         default:
2011                 return 0;
2012         }
2013 }
2014
2015 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2016 {
2017         switch (prog_type) {
2018         case BPF_PROG_TYPE_SCHED_CLS:
2019         case BPF_PROG_TYPE_SCHED_ACT:
2020         case BPF_PROG_TYPE_XDP:
2021         case BPF_PROG_TYPE_LWT_IN:
2022         case BPF_PROG_TYPE_LWT_OUT:
2023         case BPF_PROG_TYPE_LWT_XMIT:
2024         case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2025         case BPF_PROG_TYPE_SK_SKB:
2026         case BPF_PROG_TYPE_SK_MSG:
2027         case BPF_PROG_TYPE_LIRC_MODE2:
2028         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2029         case BPF_PROG_TYPE_CGROUP_DEVICE:
2030         case BPF_PROG_TYPE_CGROUP_SOCK:
2031         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2032         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2033         case BPF_PROG_TYPE_CGROUP_SYSCTL:
2034         case BPF_PROG_TYPE_SOCK_OPS:
2035         case BPF_PROG_TYPE_EXT: /* extends any prog */
2036                 return true;
2037         case BPF_PROG_TYPE_CGROUP_SKB:
2038                 /* always unpriv */
2039         case BPF_PROG_TYPE_SK_REUSEPORT:
2040                 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2041         default:
2042                 return false;
2043         }
2044 }
2045
2046 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2047 {
2048         switch (prog_type) {
2049         case BPF_PROG_TYPE_KPROBE:
2050         case BPF_PROG_TYPE_TRACEPOINT:
2051         case BPF_PROG_TYPE_PERF_EVENT:
2052         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2053         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2054         case BPF_PROG_TYPE_TRACING:
2055         case BPF_PROG_TYPE_LSM:
2056         case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2057         case BPF_PROG_TYPE_EXT: /* extends any prog */
2058                 return true;
2059         default:
2060                 return false;
2061         }
2062 }
2063
2064 /* last field in 'union bpf_attr' used by this command */
2065 #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
2066
2067 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
2068 {
2069         enum bpf_prog_type type = attr->prog_type;
2070         struct bpf_prog *prog, *dst_prog = NULL;
2071         struct btf *attach_btf = NULL;
2072         int err;
2073         char license[128];
2074         bool is_gpl;
2075
2076         if (CHECK_ATTR(BPF_PROG_LOAD))
2077                 return -EINVAL;
2078
2079         if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2080                                  BPF_F_ANY_ALIGNMENT |
2081                                  BPF_F_TEST_STATE_FREQ |
2082                                  BPF_F_SLEEPABLE |
2083                                  BPF_F_TEST_RND_HI32))
2084                 return -EINVAL;
2085
2086         if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2087             (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2088             !bpf_capable())
2089                 return -EPERM;
2090
2091         /* copy eBPF program license from user space */
2092         if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
2093                               sizeof(license) - 1) < 0)
2094                 return -EFAULT;
2095         license[sizeof(license) - 1] = 0;
2096
2097         /* eBPF programs must be GPL compatible to use GPL-ed functions */
2098         is_gpl = license_is_gpl_compatible(license);
2099
2100         if (attr->insn_cnt == 0 ||
2101             attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2102                 return -E2BIG;
2103         if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2104             type != BPF_PROG_TYPE_CGROUP_SKB &&
2105             !bpf_capable())
2106                 return -EPERM;
2107
2108         if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2109                 return -EPERM;
2110         if (is_perfmon_prog_type(type) && !perfmon_capable())
2111                 return -EPERM;
2112
2113         /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2114          * or btf, we need to check which one it is
2115          */
2116         if (attr->attach_prog_fd) {
2117                 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2118                 if (IS_ERR(dst_prog)) {
2119                         dst_prog = NULL;
2120                         attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2121                         if (IS_ERR(attach_btf))
2122                                 return -EINVAL;
2123                         if (!btf_is_kernel(attach_btf)) {
2124                                 /* attaching through specifying bpf_prog's BTF
2125                                  * objects directly might be supported eventually
2126                                  */
2127                                 btf_put(attach_btf);
2128                                 return -ENOTSUPP;
2129                         }
2130                 }
2131         } else if (attr->attach_btf_id) {
2132                 /* fall back to vmlinux BTF, if BTF type ID is specified */
2133                 attach_btf = bpf_get_btf_vmlinux();
2134                 if (IS_ERR(attach_btf))
2135                         return PTR_ERR(attach_btf);
2136                 if (!attach_btf)
2137                         return -EINVAL;
2138                 btf_get(attach_btf);
2139         }
2140
2141         bpf_prog_load_fixup_attach_type(attr);
2142         if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2143                                        attach_btf, attr->attach_btf_id,
2144                                        dst_prog)) {
2145                 if (dst_prog)
2146                         bpf_prog_put(dst_prog);
2147                 if (attach_btf)
2148                         btf_put(attach_btf);
2149                 return -EINVAL;
2150         }
2151
2152         /* plain bpf_prog allocation */
2153         prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2154         if (!prog) {
2155                 if (dst_prog)
2156                         bpf_prog_put(dst_prog);
2157                 if (attach_btf)
2158                         btf_put(attach_btf);
2159                 return -ENOMEM;
2160         }
2161
2162         prog->expected_attach_type = attr->expected_attach_type;
2163         prog->aux->attach_btf = attach_btf;
2164         prog->aux->attach_btf_id = attr->attach_btf_id;
2165         prog->aux->dst_prog = dst_prog;
2166         prog->aux->offload_requested = !!attr->prog_ifindex;
2167         prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2168
2169         err = security_bpf_prog_alloc(prog->aux);
2170         if (err)
2171                 goto free_prog;
2172
2173         prog->aux->user = get_current_user();
2174         prog->len = attr->insn_cnt;
2175
2176         err = -EFAULT;
2177         if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
2178                            bpf_prog_insn_size(prog)) != 0)
2179                 goto free_prog_sec;
2180
2181         prog->orig_prog = NULL;
2182         prog->jited = 0;
2183
2184         atomic64_set(&prog->aux->refcnt, 1);
2185         prog->gpl_compatible = is_gpl ? 1 : 0;
2186
2187         if (bpf_prog_is_dev_bound(prog->aux)) {
2188                 err = bpf_prog_offload_init(prog, attr);
2189                 if (err)
2190                         goto free_prog_sec;
2191         }
2192
2193         /* find program type: socket_filter vs tracing_filter */
2194         err = find_prog_type(type, prog);
2195         if (err < 0)
2196                 goto free_prog_sec;
2197
2198         prog->aux->load_time = ktime_get_boottime_ns();
2199         err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2200                                sizeof(attr->prog_name));
2201         if (err < 0)
2202                 goto free_prog_sec;
2203
2204         /* run eBPF verifier */
2205         err = bpf_check(&prog, attr, uattr);
2206         if (err < 0)
2207                 goto free_used_maps;
2208
2209         prog = bpf_prog_select_runtime(prog, &err);
2210         if (err < 0)
2211                 goto free_used_maps;
2212
2213         err = bpf_prog_alloc_id(prog);
2214         if (err)
2215                 goto free_used_maps;
2216
2217         /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2218          * effectively publicly exposed. However, retrieving via
2219          * bpf_prog_get_fd_by_id() will take another reference,
2220          * therefore it cannot be gone underneath us.
2221          *
2222          * Only for the time /after/ successful bpf_prog_new_fd()
2223          * and before returning to userspace, we might just hold
2224          * one reference and any parallel close on that fd could
2225          * rip everything out. Hence, below notifications must
2226          * happen before bpf_prog_new_fd().
2227          *
2228          * Also, any failure handling from this point onwards must
2229          * be using bpf_prog_put() given the program is exposed.
2230          */
2231         bpf_prog_kallsyms_add(prog);
2232         perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2233         bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2234
2235         err = bpf_prog_new_fd(prog);
2236         if (err < 0)
2237                 bpf_prog_put(prog);
2238         return err;
2239
2240 free_used_maps:
2241         /* In case we have subprogs, we need to wait for a grace
2242          * period before we can tear down JIT memory since symbols
2243          * are already exposed under kallsyms.
2244          */
2245         __bpf_prog_put_noref(prog, prog->aux->func_cnt);
2246         return err;
2247 free_prog_sec:
2248         free_uid(prog->aux->user);
2249         security_bpf_prog_free(prog->aux);
2250 free_prog:
2251         if (prog->aux->attach_btf)
2252                 btf_put(prog->aux->attach_btf);
2253         bpf_prog_free(prog);
2254         return err;
2255 }
2256
2257 #define BPF_OBJ_LAST_FIELD file_flags
2258
2259 static int bpf_obj_pin(const union bpf_attr *attr)
2260 {
2261         if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2262                 return -EINVAL;
2263
2264         return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2265 }
2266
2267 static int bpf_obj_get(const union bpf_attr *attr)
2268 {
2269         if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2270             attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2271                 return -EINVAL;
2272
2273         return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2274                                 attr->file_flags);
2275 }
2276
2277 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2278                    const struct bpf_link_ops *ops, struct bpf_prog *prog)
2279 {
2280         atomic64_set(&link->refcnt, 1);
2281         link->type = type;
2282         link->id = 0;
2283         link->ops = ops;
2284         link->prog = prog;
2285 }
2286
2287 static void bpf_link_free_id(int id)
2288 {
2289         if (!id)
2290                 return;
2291
2292         spin_lock_bh(&link_idr_lock);
2293         idr_remove(&link_idr, id);
2294         spin_unlock_bh(&link_idr_lock);
2295 }
2296
2297 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2298  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2299  * anon_inode's release() call. This helper marksbpf_link as
2300  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2301  * is not decremented, it's the responsibility of a calling code that failed
2302  * to complete bpf_link initialization.
2303  */
2304 void bpf_link_cleanup(struct bpf_link_primer *primer)
2305 {
2306         primer->link->prog = NULL;
2307         bpf_link_free_id(primer->id);
2308         fput(primer->file);
2309         put_unused_fd(primer->fd);
2310 }
2311
2312 void bpf_link_inc(struct bpf_link *link)
2313 {
2314         atomic64_inc(&link->refcnt);
2315 }
2316
2317 /* bpf_link_free is guaranteed to be called from process context */
2318 static void bpf_link_free(struct bpf_link *link)
2319 {
2320         bpf_link_free_id(link->id);
2321         if (link->prog) {
2322                 /* detach BPF program, clean up used resources */
2323                 link->ops->release(link);
2324                 bpf_prog_put(link->prog);
2325         }
2326         /* free bpf_link and its containing memory */
2327         link->ops->dealloc(link);
2328 }
2329
2330 static void bpf_link_put_deferred(struct work_struct *work)
2331 {
2332         struct bpf_link *link = container_of(work, struct bpf_link, work);
2333
2334         bpf_link_free(link);
2335 }
2336
2337 /* bpf_link_put can be called from atomic context, but ensures that resources
2338  * are freed from process context
2339  */
2340 void bpf_link_put(struct bpf_link *link)
2341 {
2342         if (!atomic64_dec_and_test(&link->refcnt))
2343                 return;
2344
2345         if (in_atomic()) {
2346                 INIT_WORK(&link->work, bpf_link_put_deferred);
2347                 schedule_work(&link->work);
2348         } else {
2349                 bpf_link_free(link);
2350         }
2351 }
2352
2353 static int bpf_link_release(struct inode *inode, struct file *filp)
2354 {
2355         struct bpf_link *link = filp->private_data;
2356
2357         bpf_link_put(link);
2358         return 0;
2359 }
2360
2361 #ifdef CONFIG_PROC_FS
2362 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2363 #define BPF_MAP_TYPE(_id, _ops)
2364 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2365 static const char *bpf_link_type_strs[] = {
2366         [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2367 #include <linux/bpf_types.h>
2368 };
2369 #undef BPF_PROG_TYPE
2370 #undef BPF_MAP_TYPE
2371 #undef BPF_LINK_TYPE
2372
2373 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2374 {
2375         const struct bpf_link *link = filp->private_data;
2376         const struct bpf_prog *prog = link->prog;
2377         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2378
2379         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2380         seq_printf(m,
2381                    "link_type:\t%s\n"
2382                    "link_id:\t%u\n"
2383                    "prog_tag:\t%s\n"
2384                    "prog_id:\t%u\n",
2385                    bpf_link_type_strs[link->type],
2386                    link->id,
2387                    prog_tag,
2388                    prog->aux->id);
2389         if (link->ops->show_fdinfo)
2390                 link->ops->show_fdinfo(link, m);
2391 }
2392 #endif
2393
2394 static const struct file_operations bpf_link_fops = {
2395 #ifdef CONFIG_PROC_FS
2396         .show_fdinfo    = bpf_link_show_fdinfo,
2397 #endif
2398         .release        = bpf_link_release,
2399         .read           = bpf_dummy_read,
2400         .write          = bpf_dummy_write,
2401 };
2402
2403 static int bpf_link_alloc_id(struct bpf_link *link)
2404 {
2405         int id;
2406
2407         idr_preload(GFP_KERNEL);
2408         spin_lock_bh(&link_idr_lock);
2409         id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2410         spin_unlock_bh(&link_idr_lock);
2411         idr_preload_end();
2412
2413         return id;
2414 }
2415
2416 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2417  * reserving unused FD and allocating ID from link_idr. This is to be paired
2418  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2419  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2420  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2421  * transient state is passed around in struct bpf_link_primer.
2422  * This is preferred way to create and initialize bpf_link, especially when
2423  * there are complicated and expensive operations inbetween creating bpf_link
2424  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2425  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2426  * expensive (and potentially failing) roll back operations in a rare case
2427  * that file, FD, or ID can't be allocated.
2428  */
2429 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2430 {
2431         struct file *file;
2432         int fd, id;
2433
2434         fd = get_unused_fd_flags(O_CLOEXEC);
2435         if (fd < 0)
2436                 return fd;
2437
2438
2439         id = bpf_link_alloc_id(link);
2440         if (id < 0) {
2441                 put_unused_fd(fd);
2442                 return id;
2443         }
2444
2445         file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2446         if (IS_ERR(file)) {
2447                 bpf_link_free_id(id);
2448                 put_unused_fd(fd);
2449                 return PTR_ERR(file);
2450         }
2451
2452         primer->link = link;
2453         primer->file = file;
2454         primer->fd = fd;
2455         primer->id = id;
2456         return 0;
2457 }
2458
2459 int bpf_link_settle(struct bpf_link_primer *primer)
2460 {
2461         /* make bpf_link fetchable by ID */
2462         spin_lock_bh(&link_idr_lock);
2463         primer->link->id = primer->id;
2464         spin_unlock_bh(&link_idr_lock);
2465         /* make bpf_link fetchable by FD */
2466         fd_install(primer->fd, primer->file);
2467         /* pass through installed FD */
2468         return primer->fd;
2469 }
2470
2471 int bpf_link_new_fd(struct bpf_link *link)
2472 {
2473         return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2474 }
2475
2476 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2477 {
2478         struct fd f = fdget(ufd);
2479         struct bpf_link *link;
2480
2481         if (!f.file)
2482                 return ERR_PTR(-EBADF);
2483         if (f.file->f_op != &bpf_link_fops) {
2484                 fdput(f);
2485                 return ERR_PTR(-EINVAL);
2486         }
2487
2488         link = f.file->private_data;
2489         bpf_link_inc(link);
2490         fdput(f);
2491
2492         return link;
2493 }
2494
2495 struct bpf_tracing_link {
2496         struct bpf_link link;
2497         enum bpf_attach_type attach_type;
2498         struct bpf_trampoline *trampoline;
2499         struct bpf_prog *tgt_prog;
2500 };
2501
2502 static void bpf_tracing_link_release(struct bpf_link *link)
2503 {
2504         struct bpf_tracing_link *tr_link =
2505                 container_of(link, struct bpf_tracing_link, link);
2506
2507         WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
2508                                                 tr_link->trampoline));
2509
2510         bpf_trampoline_put(tr_link->trampoline);
2511
2512         /* tgt_prog is NULL if target is a kernel function */
2513         if (tr_link->tgt_prog)
2514                 bpf_prog_put(tr_link->tgt_prog);
2515 }
2516
2517 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2518 {
2519         struct bpf_tracing_link *tr_link =
2520                 container_of(link, struct bpf_tracing_link, link);
2521
2522         kfree(tr_link);
2523 }
2524
2525 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2526                                          struct seq_file *seq)
2527 {
2528         struct bpf_tracing_link *tr_link =
2529                 container_of(link, struct bpf_tracing_link, link);
2530
2531         seq_printf(seq,
2532                    "attach_type:\t%d\n",
2533                    tr_link->attach_type);
2534 }
2535
2536 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2537                                            struct bpf_link_info *info)
2538 {
2539         struct bpf_tracing_link *tr_link =
2540                 container_of(link, struct bpf_tracing_link, link);
2541
2542         info->tracing.attach_type = tr_link->attach_type;
2543
2544         return 0;
2545 }
2546
2547 static const struct bpf_link_ops bpf_tracing_link_lops = {
2548         .release = bpf_tracing_link_release,
2549         .dealloc = bpf_tracing_link_dealloc,
2550         .show_fdinfo = bpf_tracing_link_show_fdinfo,
2551         .fill_link_info = bpf_tracing_link_fill_link_info,
2552 };
2553
2554 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
2555                                    int tgt_prog_fd,
2556                                    u32 btf_id)
2557 {
2558         struct bpf_link_primer link_primer;
2559         struct bpf_prog *tgt_prog = NULL;
2560         struct bpf_trampoline *tr = NULL;
2561         struct bpf_tracing_link *link;
2562         u64 key = 0;
2563         int err;
2564
2565         switch (prog->type) {
2566         case BPF_PROG_TYPE_TRACING:
2567                 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2568                     prog->expected_attach_type != BPF_TRACE_FEXIT &&
2569                     prog->expected_attach_type != BPF_MODIFY_RETURN) {
2570                         err = -EINVAL;
2571                         goto out_put_prog;
2572                 }
2573                 break;
2574         case BPF_PROG_TYPE_EXT:
2575                 if (prog->expected_attach_type != 0) {
2576                         err = -EINVAL;
2577                         goto out_put_prog;
2578                 }
2579                 break;
2580         case BPF_PROG_TYPE_LSM:
2581                 if (prog->expected_attach_type != BPF_LSM_MAC) {
2582                         err = -EINVAL;
2583                         goto out_put_prog;
2584                 }
2585                 break;
2586         default:
2587                 err = -EINVAL;
2588                 goto out_put_prog;
2589         }
2590
2591         if (!!tgt_prog_fd != !!btf_id) {
2592                 err = -EINVAL;
2593                 goto out_put_prog;
2594         }
2595
2596         if (tgt_prog_fd) {
2597                 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */
2598                 if (prog->type != BPF_PROG_TYPE_EXT) {
2599                         err = -EINVAL;
2600                         goto out_put_prog;
2601                 }
2602
2603                 tgt_prog = bpf_prog_get(tgt_prog_fd);
2604                 if (IS_ERR(tgt_prog)) {
2605                         err = PTR_ERR(tgt_prog);
2606                         tgt_prog = NULL;
2607                         goto out_put_prog;
2608                 }
2609
2610                 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
2611         }
2612
2613         link = kzalloc(sizeof(*link), GFP_USER);
2614         if (!link) {
2615                 err = -ENOMEM;
2616                 goto out_put_prog;
2617         }
2618         bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2619                       &bpf_tracing_link_lops, prog);
2620         link->attach_type = prog->expected_attach_type;
2621
2622         mutex_lock(&prog->aux->dst_mutex);
2623
2624         /* There are a few possible cases here:
2625          *
2626          * - if prog->aux->dst_trampoline is set, the program was just loaded
2627          *   and not yet attached to anything, so we can use the values stored
2628          *   in prog->aux
2629          *
2630          * - if prog->aux->dst_trampoline is NULL, the program has already been
2631          *   attached to a target and its initial target was cleared (below)
2632          *
2633          * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
2634          *   target_btf_id using the link_create API.
2635          *
2636          * - if tgt_prog == NULL when this function was called using the old
2637          *   raw_tracepoint_open API, and we need a target from prog->aux
2638          *
2639          * The combination of no saved target in prog->aux, and no target
2640          * specified on load is illegal, and we reject that here.
2641          */
2642         if (!prog->aux->dst_trampoline && !tgt_prog) {
2643                 err = -ENOENT;
2644                 goto out_unlock;
2645         }
2646
2647         if (!prog->aux->dst_trampoline ||
2648             (key && key != prog->aux->dst_trampoline->key)) {
2649                 /* If there is no saved target, or the specified target is
2650                  * different from the destination specified at load time, we
2651                  * need a new trampoline and a check for compatibility
2652                  */
2653                 struct bpf_attach_target_info tgt_info = {};
2654
2655                 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
2656                                               &tgt_info);
2657                 if (err)
2658                         goto out_unlock;
2659
2660                 tr = bpf_trampoline_get(key, &tgt_info);
2661                 if (!tr) {
2662                         err = -ENOMEM;
2663                         goto out_unlock;
2664                 }
2665         } else {
2666                 /* The caller didn't specify a target, or the target was the
2667                  * same as the destination supplied during program load. This
2668                  * means we can reuse the trampoline and reference from program
2669                  * load time, and there is no need to allocate a new one. This
2670                  * can only happen once for any program, as the saved values in
2671                  * prog->aux are cleared below.
2672                  */
2673                 tr = prog->aux->dst_trampoline;
2674                 tgt_prog = prog->aux->dst_prog;
2675         }
2676
2677         err = bpf_link_prime(&link->link, &link_primer);
2678         if (err)
2679                 goto out_unlock;
2680
2681         err = bpf_trampoline_link_prog(prog, tr);
2682         if (err) {
2683                 bpf_link_cleanup(&link_primer);
2684                 link = NULL;
2685                 goto out_unlock;
2686         }
2687
2688         link->tgt_prog = tgt_prog;
2689         link->trampoline = tr;
2690
2691         /* Always clear the trampoline and target prog from prog->aux to make
2692          * sure the original attach destination is not kept alive after a
2693          * program is (re-)attached to another target.
2694          */
2695         if (prog->aux->dst_prog &&
2696             (tgt_prog_fd || tr != prog->aux->dst_trampoline))
2697                 /* got extra prog ref from syscall, or attaching to different prog */
2698                 bpf_prog_put(prog->aux->dst_prog);
2699         if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
2700                 /* we allocated a new trampoline, so free the old one */
2701                 bpf_trampoline_put(prog->aux->dst_trampoline);
2702
2703         prog->aux->dst_prog = NULL;
2704         prog->aux->dst_trampoline = NULL;
2705         mutex_unlock(&prog->aux->dst_mutex);
2706
2707         return bpf_link_settle(&link_primer);
2708 out_unlock:
2709         if (tr && tr != prog->aux->dst_trampoline)
2710                 bpf_trampoline_put(tr);
2711         mutex_unlock(&prog->aux->dst_mutex);
2712         kfree(link);
2713 out_put_prog:
2714         if (tgt_prog_fd && tgt_prog)
2715                 bpf_prog_put(tgt_prog);
2716         bpf_prog_put(prog);
2717         return err;
2718 }
2719
2720 struct bpf_raw_tp_link {
2721         struct bpf_link link;
2722         struct bpf_raw_event_map *btp;
2723 };
2724
2725 static void bpf_raw_tp_link_release(struct bpf_link *link)
2726 {
2727         struct bpf_raw_tp_link *raw_tp =
2728                 container_of(link, struct bpf_raw_tp_link, link);
2729
2730         bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2731         bpf_put_raw_tracepoint(raw_tp->btp);
2732 }
2733
2734 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2735 {
2736         struct bpf_raw_tp_link *raw_tp =
2737                 container_of(link, struct bpf_raw_tp_link, link);
2738
2739         kfree(raw_tp);
2740 }
2741
2742 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2743                                         struct seq_file *seq)
2744 {
2745         struct bpf_raw_tp_link *raw_tp_link =
2746                 container_of(link, struct bpf_raw_tp_link, link);
2747
2748         seq_printf(seq,
2749                    "tp_name:\t%s\n",
2750                    raw_tp_link->btp->tp->name);
2751 }
2752
2753 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2754                                           struct bpf_link_info *info)
2755 {
2756         struct bpf_raw_tp_link *raw_tp_link =
2757                 container_of(link, struct bpf_raw_tp_link, link);
2758         char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2759         const char *tp_name = raw_tp_link->btp->tp->name;
2760         u32 ulen = info->raw_tracepoint.tp_name_len;
2761         size_t tp_len = strlen(tp_name);
2762
2763         if (!ulen ^ !ubuf)
2764                 return -EINVAL;
2765
2766         info->raw_tracepoint.tp_name_len = tp_len + 1;
2767
2768         if (!ubuf)
2769                 return 0;
2770
2771         if (ulen >= tp_len + 1) {
2772                 if (copy_to_user(ubuf, tp_name, tp_len + 1))
2773                         return -EFAULT;
2774         } else {
2775                 char zero = '\0';
2776
2777                 if (copy_to_user(ubuf, tp_name, ulen - 1))
2778                         return -EFAULT;
2779                 if (put_user(zero, ubuf + ulen - 1))
2780                         return -EFAULT;
2781                 return -ENOSPC;
2782         }
2783
2784         return 0;
2785 }
2786
2787 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2788         .release = bpf_raw_tp_link_release,
2789         .dealloc = bpf_raw_tp_link_dealloc,
2790         .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2791         .fill_link_info = bpf_raw_tp_link_fill_link_info,
2792 };
2793
2794 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2795
2796 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2797 {
2798         struct bpf_link_primer link_primer;
2799         struct bpf_raw_tp_link *link;
2800         struct bpf_raw_event_map *btp;
2801         struct bpf_prog *prog;
2802         const char *tp_name;
2803         char buf[128];
2804         int err;
2805
2806         if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2807                 return -EINVAL;
2808
2809         prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2810         if (IS_ERR(prog))
2811                 return PTR_ERR(prog);
2812
2813         switch (prog->type) {
2814         case BPF_PROG_TYPE_TRACING:
2815         case BPF_PROG_TYPE_EXT:
2816         case BPF_PROG_TYPE_LSM:
2817                 if (attr->raw_tracepoint.name) {
2818                         /* The attach point for this category of programs
2819                          * should be specified via btf_id during program load.
2820                          */
2821                         err = -EINVAL;
2822                         goto out_put_prog;
2823                 }
2824                 if (prog->type == BPF_PROG_TYPE_TRACING &&
2825                     prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2826                         tp_name = prog->aux->attach_func_name;
2827                         break;
2828                 }
2829                 return bpf_tracing_prog_attach(prog, 0, 0);
2830         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2831         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2832                 if (strncpy_from_user(buf,
2833                                       u64_to_user_ptr(attr->raw_tracepoint.name),
2834                                       sizeof(buf) - 1) < 0) {
2835                         err = -EFAULT;
2836                         goto out_put_prog;
2837                 }
2838                 buf[sizeof(buf) - 1] = 0;
2839                 tp_name = buf;
2840                 break;
2841         default:
2842                 err = -EINVAL;
2843                 goto out_put_prog;
2844         }
2845
2846         btp = bpf_get_raw_tracepoint(tp_name);
2847         if (!btp) {
2848                 err = -ENOENT;
2849                 goto out_put_prog;
2850         }
2851
2852         link = kzalloc(sizeof(*link), GFP_USER);
2853         if (!link) {
2854                 err = -ENOMEM;
2855                 goto out_put_btp;
2856         }
2857         bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
2858                       &bpf_raw_tp_link_lops, prog);
2859         link->btp = btp;
2860
2861         err = bpf_link_prime(&link->link, &link_primer);
2862         if (err) {
2863                 kfree(link);
2864                 goto out_put_btp;
2865         }
2866
2867         err = bpf_probe_register(link->btp, prog);
2868         if (err) {
2869                 bpf_link_cleanup(&link_primer);
2870                 goto out_put_btp;
2871         }
2872
2873         return bpf_link_settle(&link_primer);
2874
2875 out_put_btp:
2876         bpf_put_raw_tracepoint(btp);
2877 out_put_prog:
2878         bpf_prog_put(prog);
2879         return err;
2880 }
2881
2882 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2883                                              enum bpf_attach_type attach_type)
2884 {
2885         switch (prog->type) {
2886         case BPF_PROG_TYPE_CGROUP_SOCK:
2887         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2888         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2889         case BPF_PROG_TYPE_SK_LOOKUP:
2890                 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2891         case BPF_PROG_TYPE_CGROUP_SKB:
2892                 if (!capable(CAP_NET_ADMIN))
2893                         /* cg-skb progs can be loaded by unpriv user.
2894                          * check permissions at attach time.
2895                          */
2896                         return -EPERM;
2897                 return prog->enforce_expected_attach_type &&
2898                         prog->expected_attach_type != attach_type ?
2899                         -EINVAL : 0;
2900         default:
2901                 return 0;
2902         }
2903 }
2904
2905 static enum bpf_prog_type
2906 attach_type_to_prog_type(enum bpf_attach_type attach_type)
2907 {
2908         switch (attach_type) {
2909         case BPF_CGROUP_INET_INGRESS:
2910         case BPF_CGROUP_INET_EGRESS:
2911                 return BPF_PROG_TYPE_CGROUP_SKB;
2912         case BPF_CGROUP_INET_SOCK_CREATE:
2913         case BPF_CGROUP_INET_SOCK_RELEASE:
2914         case BPF_CGROUP_INET4_POST_BIND:
2915         case BPF_CGROUP_INET6_POST_BIND:
2916                 return BPF_PROG_TYPE_CGROUP_SOCK;
2917         case BPF_CGROUP_INET4_BIND:
2918         case BPF_CGROUP_INET6_BIND:
2919         case BPF_CGROUP_INET4_CONNECT:
2920         case BPF_CGROUP_INET6_CONNECT:
2921         case BPF_CGROUP_INET4_GETPEERNAME:
2922         case BPF_CGROUP_INET6_GETPEERNAME:
2923         case BPF_CGROUP_INET4_GETSOCKNAME:
2924         case BPF_CGROUP_INET6_GETSOCKNAME:
2925         case BPF_CGROUP_UDP4_SENDMSG:
2926         case BPF_CGROUP_UDP6_SENDMSG:
2927         case BPF_CGROUP_UDP4_RECVMSG:
2928         case BPF_CGROUP_UDP6_RECVMSG:
2929                 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2930         case BPF_CGROUP_SOCK_OPS:
2931                 return BPF_PROG_TYPE_SOCK_OPS;
2932         case BPF_CGROUP_DEVICE:
2933                 return BPF_PROG_TYPE_CGROUP_DEVICE;
2934         case BPF_SK_MSG_VERDICT:
2935                 return BPF_PROG_TYPE_SK_MSG;
2936         case BPF_SK_SKB_STREAM_PARSER:
2937         case BPF_SK_SKB_STREAM_VERDICT:
2938                 return BPF_PROG_TYPE_SK_SKB;
2939         case BPF_LIRC_MODE2:
2940                 return BPF_PROG_TYPE_LIRC_MODE2;
2941         case BPF_FLOW_DISSECTOR:
2942                 return BPF_PROG_TYPE_FLOW_DISSECTOR;
2943         case BPF_CGROUP_SYSCTL:
2944                 return BPF_PROG_TYPE_CGROUP_SYSCTL;
2945         case BPF_CGROUP_GETSOCKOPT:
2946         case BPF_CGROUP_SETSOCKOPT:
2947                 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
2948         case BPF_TRACE_ITER:
2949                 return BPF_PROG_TYPE_TRACING;
2950         case BPF_SK_LOOKUP:
2951                 return BPF_PROG_TYPE_SK_LOOKUP;
2952         case BPF_XDP:
2953                 return BPF_PROG_TYPE_XDP;
2954         default:
2955                 return BPF_PROG_TYPE_UNSPEC;
2956         }
2957 }
2958
2959 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
2960
2961 #define BPF_F_ATTACH_MASK \
2962         (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
2963
2964 static int bpf_prog_attach(const union bpf_attr *attr)
2965 {
2966         enum bpf_prog_type ptype;
2967         struct bpf_prog *prog;
2968         int ret;
2969
2970         if (CHECK_ATTR(BPF_PROG_ATTACH))
2971                 return -EINVAL;
2972
2973         if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
2974                 return -EINVAL;
2975
2976         ptype = attach_type_to_prog_type(attr->attach_type);
2977         if (ptype == BPF_PROG_TYPE_UNSPEC)
2978                 return -EINVAL;
2979
2980         prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
2981         if (IS_ERR(prog))
2982                 return PTR_ERR(prog);
2983
2984         if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
2985                 bpf_prog_put(prog);
2986                 return -EINVAL;
2987         }
2988
2989         switch (ptype) {
2990         case BPF_PROG_TYPE_SK_SKB:
2991         case BPF_PROG_TYPE_SK_MSG:
2992                 ret = sock_map_get_from_fd(attr, prog);
2993                 break;
2994         case BPF_PROG_TYPE_LIRC_MODE2:
2995                 ret = lirc_prog_attach(attr, prog);
2996                 break;
2997         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2998                 ret = netns_bpf_prog_attach(attr, prog);
2999                 break;
3000         case BPF_PROG_TYPE_CGROUP_DEVICE:
3001         case BPF_PROG_TYPE_CGROUP_SKB:
3002         case BPF_PROG_TYPE_CGROUP_SOCK:
3003         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3004         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3005         case BPF_PROG_TYPE_CGROUP_SYSCTL:
3006         case BPF_PROG_TYPE_SOCK_OPS:
3007                 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3008                 break;
3009         default:
3010                 ret = -EINVAL;
3011         }
3012
3013         if (ret)
3014                 bpf_prog_put(prog);
3015         return ret;
3016 }
3017
3018 #define BPF_PROG_DETACH_LAST_FIELD attach_type
3019
3020 static int bpf_prog_detach(const union bpf_attr *attr)
3021 {
3022         enum bpf_prog_type ptype;
3023
3024         if (CHECK_ATTR(BPF_PROG_DETACH))
3025                 return -EINVAL;
3026
3027         ptype = attach_type_to_prog_type(attr->attach_type);
3028
3029         switch (ptype) {
3030         case BPF_PROG_TYPE_SK_MSG:
3031         case BPF_PROG_TYPE_SK_SKB:
3032                 return sock_map_prog_detach(attr, ptype);
3033         case BPF_PROG_TYPE_LIRC_MODE2:
3034                 return lirc_prog_detach(attr);
3035         case BPF_PROG_TYPE_FLOW_DISSECTOR:
3036                 return netns_bpf_prog_detach(attr, ptype);
3037         case BPF_PROG_TYPE_CGROUP_DEVICE:
3038         case BPF_PROG_TYPE_CGROUP_SKB:
3039         case BPF_PROG_TYPE_CGROUP_SOCK:
3040         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3041         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3042         case BPF_PROG_TYPE_CGROUP_SYSCTL:
3043         case BPF_PROG_TYPE_SOCK_OPS:
3044                 return cgroup_bpf_prog_detach(attr, ptype);
3045         default:
3046                 return -EINVAL;
3047         }
3048 }
3049
3050 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
3051
3052 static int bpf_prog_query(const union bpf_attr *attr,
3053                           union bpf_attr __user *uattr)
3054 {
3055         if (!capable(CAP_NET_ADMIN))
3056                 return -EPERM;
3057         if (CHECK_ATTR(BPF_PROG_QUERY))
3058                 return -EINVAL;
3059         if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3060                 return -EINVAL;
3061
3062         switch (attr->query.attach_type) {
3063         case BPF_CGROUP_INET_INGRESS:
3064         case BPF_CGROUP_INET_EGRESS:
3065         case BPF_CGROUP_INET_SOCK_CREATE:
3066         case BPF_CGROUP_INET_SOCK_RELEASE:
3067         case BPF_CGROUP_INET4_BIND:
3068         case BPF_CGROUP_INET6_BIND:
3069         case BPF_CGROUP_INET4_POST_BIND:
3070         case BPF_CGROUP_INET6_POST_BIND:
3071         case BPF_CGROUP_INET4_CONNECT:
3072         case BPF_CGROUP_INET6_CONNECT:
3073         case BPF_CGROUP_INET4_GETPEERNAME:
3074         case BPF_CGROUP_INET6_GETPEERNAME:
3075         case BPF_CGROUP_INET4_GETSOCKNAME:
3076         case BPF_CGROUP_INET6_GETSOCKNAME:
3077         case BPF_CGROUP_UDP4_SENDMSG:
3078         case BPF_CGROUP_UDP6_SENDMSG:
3079         case BPF_CGROUP_UDP4_RECVMSG:
3080         case BPF_CGROUP_UDP6_RECVMSG:
3081         case BPF_CGROUP_SOCK_OPS:
3082         case BPF_CGROUP_DEVICE:
3083         case BPF_CGROUP_SYSCTL:
3084         case BPF_CGROUP_GETSOCKOPT:
3085         case BPF_CGROUP_SETSOCKOPT:
3086                 return cgroup_bpf_prog_query(attr, uattr);
3087         case BPF_LIRC_MODE2:
3088                 return lirc_prog_query(attr, uattr);
3089         case BPF_FLOW_DISSECTOR:
3090         case BPF_SK_LOOKUP:
3091                 return netns_bpf_prog_query(attr, uattr);
3092         default:
3093                 return -EINVAL;
3094         }
3095 }
3096
3097 #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
3098
3099 static int bpf_prog_test_run(const union bpf_attr *attr,
3100                              union bpf_attr __user *uattr)
3101 {
3102         struct bpf_prog *prog;
3103         int ret = -ENOTSUPP;
3104
3105         if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3106                 return -EINVAL;
3107
3108         if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3109             (!attr->test.ctx_size_in && attr->test.ctx_in))
3110                 return -EINVAL;
3111
3112         if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3113             (!attr->test.ctx_size_out && attr->test.ctx_out))
3114                 return -EINVAL;
3115
3116         prog = bpf_prog_get(attr->test.prog_fd);
3117         if (IS_ERR(prog))
3118                 return PTR_ERR(prog);
3119
3120         if (prog->aux->ops->test_run)
3121                 ret = prog->aux->ops->test_run(prog, attr, uattr);
3122
3123         bpf_prog_put(prog);
3124         return ret;
3125 }
3126
3127 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3128
3129 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3130                                union bpf_attr __user *uattr,
3131                                struct idr *idr,
3132                                spinlock_t *lock)
3133 {
3134         u32 next_id = attr->start_id;
3135         int err = 0;
3136
3137         if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3138                 return -EINVAL;
3139
3140         if (!capable(CAP_SYS_ADMIN))
3141                 return -EPERM;
3142
3143         next_id++;
3144         spin_lock_bh(lock);
3145         if (!idr_get_next(idr, &next_id))
3146                 err = -ENOENT;
3147         spin_unlock_bh(lock);
3148
3149         if (!err)
3150                 err = put_user(next_id, &uattr->next_id);
3151
3152         return err;
3153 }
3154
3155 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3156 {
3157         struct bpf_map *map;
3158
3159         spin_lock_bh(&map_idr_lock);
3160 again:
3161         map = idr_get_next(&map_idr, id);
3162         if (map) {
3163                 map = __bpf_map_inc_not_zero(map, false);
3164                 if (IS_ERR(map)) {
3165                         (*id)++;
3166                         goto again;
3167                 }
3168         }
3169         spin_unlock_bh(&map_idr_lock);
3170
3171         return map;
3172 }
3173
3174 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3175 {
3176         struct bpf_prog *prog;
3177
3178         spin_lock_bh(&prog_idr_lock);
3179 again:
3180         prog = idr_get_next(&prog_idr, id);
3181         if (prog) {
3182                 prog = bpf_prog_inc_not_zero(prog);
3183                 if (IS_ERR(prog)) {
3184                         (*id)++;
3185                         goto again;
3186                 }
3187         }
3188         spin_unlock_bh(&prog_idr_lock);
3189
3190         return prog;
3191 }
3192
3193 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3194
3195 struct bpf_prog *bpf_prog_by_id(u32 id)
3196 {
3197         struct bpf_prog *prog;
3198
3199         if (!id)
3200                 return ERR_PTR(-ENOENT);
3201
3202         spin_lock_bh(&prog_idr_lock);
3203         prog = idr_find(&prog_idr, id);
3204         if (prog)
3205                 prog = bpf_prog_inc_not_zero(prog);
3206         else
3207                 prog = ERR_PTR(-ENOENT);
3208         spin_unlock_bh(&prog_idr_lock);
3209         return prog;
3210 }
3211
3212 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3213 {
3214         struct bpf_prog *prog;
3215         u32 id = attr->prog_id;
3216         int fd;
3217
3218         if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3219                 return -EINVAL;
3220
3221         if (!capable(CAP_SYS_ADMIN))
3222                 return -EPERM;
3223
3224         prog = bpf_prog_by_id(id);
3225         if (IS_ERR(prog))
3226                 return PTR_ERR(prog);
3227
3228         fd = bpf_prog_new_fd(prog);
3229         if (fd < 0)
3230                 bpf_prog_put(prog);
3231
3232         return fd;
3233 }
3234
3235 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3236
3237 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3238 {
3239         struct bpf_map *map;
3240         u32 id = attr->map_id;
3241         int f_flags;
3242         int fd;
3243
3244         if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3245             attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3246                 return -EINVAL;
3247
3248         if (!capable(CAP_SYS_ADMIN))
3249                 return -EPERM;
3250
3251         f_flags = bpf_get_file_flag(attr->open_flags);
3252         if (f_flags < 0)
3253                 return f_flags;
3254
3255         spin_lock_bh(&map_idr_lock);
3256         map = idr_find(&map_idr, id);
3257         if (map)
3258                 map = __bpf_map_inc_not_zero(map, true);
3259         else
3260                 map = ERR_PTR(-ENOENT);
3261         spin_unlock_bh(&map_idr_lock);
3262
3263         if (IS_ERR(map))
3264                 return PTR_ERR(map);
3265
3266         fd = bpf_map_new_fd(map, f_flags);
3267         if (fd < 0)
3268                 bpf_map_put_with_uref(map);
3269
3270         return fd;
3271 }
3272
3273 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3274                                               unsigned long addr, u32 *off,
3275                                               u32 *type)
3276 {
3277         const struct bpf_map *map;
3278         int i;
3279
3280         mutex_lock(&prog->aux->used_maps_mutex);
3281         for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3282                 map = prog->aux->used_maps[i];
3283                 if (map == (void *)addr) {
3284                         *type = BPF_PSEUDO_MAP_FD;
3285                         goto out;
3286                 }
3287                 if (!map->ops->map_direct_value_meta)
3288                         continue;
3289                 if (!map->ops->map_direct_value_meta(map, addr, off)) {
3290                         *type = BPF_PSEUDO_MAP_VALUE;
3291                         goto out;
3292                 }
3293         }
3294         map = NULL;
3295
3296 out:
3297         mutex_unlock(&prog->aux->used_maps_mutex);
3298         return map;
3299 }
3300
3301 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3302                                               const struct cred *f_cred)
3303 {
3304         const struct bpf_map *map;
3305         struct bpf_insn *insns;
3306         u32 off, type;
3307         u64 imm;
3308         u8 code;
3309         int i;
3310
3311         insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3312                         GFP_USER);
3313         if (!insns)
3314                 return insns;
3315
3316         for (i = 0; i < prog->len; i++) {
3317                 code = insns[i].code;
3318
3319                 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3320                         insns[i].code = BPF_JMP | BPF_CALL;
3321                         insns[i].imm = BPF_FUNC_tail_call;
3322                         /* fall-through */
3323                 }
3324                 if (code == (BPF_JMP | BPF_CALL) ||
3325                     code == (BPF_JMP | BPF_CALL_ARGS)) {
3326                         if (code == (BPF_JMP | BPF_CALL_ARGS))
3327                                 insns[i].code = BPF_JMP | BPF_CALL;
3328                         if (!bpf_dump_raw_ok(f_cred))
3329                                 insns[i].imm = 0;
3330                         continue;
3331                 }
3332                 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3333                         insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3334                         continue;
3335                 }
3336
3337                 if (code != (BPF_LD | BPF_IMM | BPF_DW))
3338                         continue;
3339
3340                 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3341                 map = bpf_map_from_imm(prog, imm, &off, &type);
3342                 if (map) {
3343                         insns[i].src_reg = type;
3344                         insns[i].imm = map->id;
3345                         insns[i + 1].imm = off;
3346                         continue;
3347                 }
3348         }
3349
3350         return insns;
3351 }
3352
3353 static int set_info_rec_size(struct bpf_prog_info *info)
3354 {
3355         /*
3356          * Ensure info.*_rec_size is the same as kernel expected size
3357          *
3358          * or
3359          *
3360          * Only allow zero *_rec_size if both _rec_size and _cnt are
3361          * zero.  In this case, the kernel will set the expected
3362          * _rec_size back to the info.
3363          */
3364
3365         if ((info->nr_func_info || info->func_info_rec_size) &&
3366             info->func_info_rec_size != sizeof(struct bpf_func_info))
3367                 return -EINVAL;
3368
3369         if ((info->nr_line_info || info->line_info_rec_size) &&
3370             info->line_info_rec_size != sizeof(struct bpf_line_info))
3371                 return -EINVAL;
3372
3373         if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3374             info->jited_line_info_rec_size != sizeof(__u64))
3375                 return -EINVAL;
3376
3377         info->func_info_rec_size = sizeof(struct bpf_func_info);
3378         info->line_info_rec_size = sizeof(struct bpf_line_info);
3379         info->jited_line_info_rec_size = sizeof(__u64);
3380
3381         return 0;
3382 }
3383
3384 static int bpf_prog_get_info_by_fd(struct file *file,
3385                                    struct bpf_prog *prog,
3386                                    const union bpf_attr *attr,
3387                                    union bpf_attr __user *uattr)
3388 {
3389         struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3390         struct bpf_prog_info info;
3391         u32 info_len = attr->info.info_len;
3392         struct bpf_prog_stats stats;
3393         char __user *uinsns;
3394         u32 ulen;
3395         int err;
3396
3397         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3398         if (err)
3399                 return err;
3400         info_len = min_t(u32, sizeof(info), info_len);
3401
3402         memset(&info, 0, sizeof(info));
3403         if (copy_from_user(&info, uinfo, info_len))
3404                 return -EFAULT;
3405
3406         info.type = prog->type;
3407         info.id = prog->aux->id;
3408         info.load_time = prog->aux->load_time;
3409         info.created_by_uid = from_kuid_munged(current_user_ns(),
3410                                                prog->aux->user->uid);
3411         info.gpl_compatible = prog->gpl_compatible;
3412
3413         memcpy(info.tag, prog->tag, sizeof(prog->tag));
3414         memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3415
3416         mutex_lock(&prog->aux->used_maps_mutex);
3417         ulen = info.nr_map_ids;
3418         info.nr_map_ids = prog->aux->used_map_cnt;
3419         ulen = min_t(u32, info.nr_map_ids, ulen);
3420         if (ulen) {
3421                 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3422                 u32 i;
3423
3424                 for (i = 0; i < ulen; i++)
3425                         if (put_user(prog->aux->used_maps[i]->id,
3426                                      &user_map_ids[i])) {
3427                                 mutex_unlock(&prog->aux->used_maps_mutex);
3428                                 return -EFAULT;
3429                         }
3430         }
3431         mutex_unlock(&prog->aux->used_maps_mutex);
3432
3433         err = set_info_rec_size(&info);
3434         if (err)
3435                 return err;
3436
3437         bpf_prog_get_stats(prog, &stats);
3438         info.run_time_ns = stats.nsecs;
3439         info.run_cnt = stats.cnt;
3440
3441         if (!bpf_capable()) {
3442                 info.jited_prog_len = 0;
3443                 info.xlated_prog_len = 0;
3444                 info.nr_jited_ksyms = 0;
3445                 info.nr_jited_func_lens = 0;
3446                 info.nr_func_info = 0;
3447                 info.nr_line_info = 0;
3448                 info.nr_jited_line_info = 0;
3449                 goto done;
3450         }
3451
3452         ulen = info.xlated_prog_len;
3453         info.xlated_prog_len = bpf_prog_insn_size(prog);
3454         if (info.xlated_prog_len && ulen) {
3455                 struct bpf_insn *insns_sanitized;
3456                 bool fault;
3457
3458                 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3459                         info.xlated_prog_insns = 0;
3460                         goto done;
3461                 }
3462                 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3463                 if (!insns_sanitized)
3464                         return -ENOMEM;
3465                 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3466                 ulen = min_t(u32, info.xlated_prog_len, ulen);
3467                 fault = copy_to_user(uinsns, insns_sanitized, ulen);
3468                 kfree(insns_sanitized);
3469                 if (fault)
3470                         return -EFAULT;
3471         }
3472
3473         if (bpf_prog_is_dev_bound(prog->aux)) {
3474                 err = bpf_prog_offload_info_fill(&info, prog);
3475                 if (err)
3476                         return err;
3477                 goto done;
3478         }
3479
3480         /* NOTE: the following code is supposed to be skipped for offload.
3481          * bpf_prog_offload_info_fill() is the place to fill similar fields
3482          * for offload.
3483          */
3484         ulen = info.jited_prog_len;
3485         if (prog->aux->func_cnt) {
3486                 u32 i;
3487
3488                 info.jited_prog_len = 0;
3489                 for (i = 0; i < prog->aux->func_cnt; i++)
3490                         info.jited_prog_len += prog->aux->func[i]->jited_len;
3491         } else {
3492                 info.jited_prog_len = prog->jited_len;
3493         }
3494
3495         if (info.jited_prog_len && ulen) {
3496                 if (bpf_dump_raw_ok(file->f_cred)) {
3497                         uinsns = u64_to_user_ptr(info.jited_prog_insns);
3498                         ulen = min_t(u32, info.jited_prog_len, ulen);
3499
3500                         /* for multi-function programs, copy the JITed
3501                          * instructions for all the functions
3502                          */
3503                         if (prog->aux->func_cnt) {
3504                                 u32 len, free, i;
3505                                 u8 *img;
3506
3507                                 free = ulen;
3508                                 for (i = 0; i < prog->aux->func_cnt; i++) {
3509                                         len = prog->aux->func[i]->jited_len;
3510                                         len = min_t(u32, len, free);
3511                                         img = (u8 *) prog->aux->func[i]->bpf_func;
3512                                         if (copy_to_user(uinsns, img, len))
3513                                                 return -EFAULT;
3514                                         uinsns += len;
3515                                         free -= len;
3516                                         if (!free)
3517                                                 break;
3518                                 }
3519                         } else {
3520                                 if (copy_to_user(uinsns, prog->bpf_func, ulen))
3521                                         return -EFAULT;
3522                         }
3523                 } else {
3524                         info.jited_prog_insns = 0;
3525                 }
3526         }
3527
3528         ulen = info.nr_jited_ksyms;
3529         info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3530         if (ulen) {
3531                 if (bpf_dump_raw_ok(file->f_cred)) {
3532                         unsigned long ksym_addr;
3533                         u64 __user *user_ksyms;
3534                         u32 i;
3535
3536                         /* copy the address of the kernel symbol
3537                          * corresponding to each function
3538                          */
3539                         ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3540                         user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3541                         if (prog->aux->func_cnt) {
3542                                 for (i = 0; i < ulen; i++) {
3543                                         ksym_addr = (unsigned long)
3544                                                 prog->aux->func[i]->bpf_func;
3545                                         if (put_user((u64) ksym_addr,
3546                                                      &user_ksyms[i]))
3547                                                 return -EFAULT;
3548                                 }
3549                         } else {
3550                                 ksym_addr = (unsigned long) prog->bpf_func;
3551                                 if (put_user((u64) ksym_addr, &user_ksyms[0]))
3552                                         return -EFAULT;
3553                         }
3554                 } else {
3555                         info.jited_ksyms = 0;
3556                 }
3557         }
3558
3559         ulen = info.nr_jited_func_lens;
3560         info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3561         if (ulen) {
3562                 if (bpf_dump_raw_ok(file->f_cred)) {
3563                         u32 __user *user_lens;
3564                         u32 func_len, i;
3565
3566                         /* copy the JITed image lengths for each function */
3567                         ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3568                         user_lens = u64_to_user_ptr(info.jited_func_lens);
3569                         if (prog->aux->func_cnt) {
3570                                 for (i = 0; i < ulen; i++) {
3571                                         func_len =
3572                                                 prog->aux->func[i]->jited_len;
3573                                         if (put_user(func_len, &user_lens[i]))
3574                                                 return -EFAULT;
3575                                 }
3576                         } else {
3577                                 func_len = prog->jited_len;
3578                                 if (put_user(func_len, &user_lens[0]))
3579                                         return -EFAULT;
3580                         }
3581                 } else {
3582                         info.jited_func_lens = 0;
3583                 }
3584         }
3585
3586         if (prog->aux->btf)
3587                 info.btf_id = btf_obj_id(prog->aux->btf);
3588
3589         ulen = info.nr_func_info;
3590         info.nr_func_info = prog->aux->func_info_cnt;
3591         if (info.nr_func_info && ulen) {
3592                 char __user *user_finfo;
3593
3594                 user_finfo = u64_to_user_ptr(info.func_info);
3595                 ulen = min_t(u32, info.nr_func_info, ulen);
3596                 if (copy_to_user(user_finfo, prog->aux->func_info,
3597                                  info.func_info_rec_size * ulen))
3598                         return -EFAULT;
3599         }
3600
3601         ulen = info.nr_line_info;
3602         info.nr_line_info = prog->aux->nr_linfo;
3603         if (info.nr_line_info && ulen) {
3604                 __u8 __user *user_linfo;
3605
3606                 user_linfo = u64_to_user_ptr(info.line_info);
3607                 ulen = min_t(u32, info.nr_line_info, ulen);
3608                 if (copy_to_user(user_linfo, prog->aux->linfo,
3609                                  info.line_info_rec_size * ulen))
3610                         return -EFAULT;
3611         }
3612
3613         ulen = info.nr_jited_line_info;
3614         if (prog->aux->jited_linfo)
3615                 info.nr_jited_line_info = prog->aux->nr_linfo;
3616         else
3617                 info.nr_jited_line_info = 0;
3618         if (info.nr_jited_line_info && ulen) {
3619                 if (bpf_dump_raw_ok(file->f_cred)) {
3620                         __u64 __user *user_linfo;
3621                         u32 i;
3622
3623                         user_linfo = u64_to_user_ptr(info.jited_line_info);
3624                         ulen = min_t(u32, info.nr_jited_line_info, ulen);
3625                         for (i = 0; i < ulen; i++) {
3626                                 if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3627                                              &user_linfo[i]))
3628                                         return -EFAULT;
3629                         }
3630                 } else {
3631                         info.jited_line_info = 0;
3632                 }
3633         }
3634
3635         ulen = info.nr_prog_tags;
3636         info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3637         if (ulen) {
3638                 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3639                 u32 i;
3640
3641                 user_prog_tags = u64_to_user_ptr(info.prog_tags);
3642                 ulen = min_t(u32, info.nr_prog_tags, ulen);
3643                 if (prog->aux->func_cnt) {
3644                         for (i = 0; i < ulen; i++) {
3645                                 if (copy_to_user(user_prog_tags[i],
3646                                                  prog->aux->func[i]->tag,
3647                                                  BPF_TAG_SIZE))
3648                                         return -EFAULT;
3649                         }
3650                 } else {
3651                         if (copy_to_user(user_prog_tags[0],
3652                                          prog->tag, BPF_TAG_SIZE))
3653                                 return -EFAULT;
3654                 }
3655         }
3656
3657 done:
3658         if (copy_to_user(uinfo, &info, info_len) ||
3659             put_user(info_len, &uattr->info.info_len))
3660                 return -EFAULT;
3661
3662         return 0;
3663 }
3664
3665 static int bpf_map_get_info_by_fd(struct file *file,
3666                                   struct bpf_map *map,
3667                                   const union bpf_attr *attr,
3668                                   union bpf_attr __user *uattr)
3669 {
3670         struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3671         struct bpf_map_info info;
3672         u32 info_len = attr->info.info_len;
3673         int err;
3674
3675         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3676         if (err)
3677                 return err;
3678         info_len = min_t(u32, sizeof(info), info_len);
3679
3680         memset(&info, 0, sizeof(info));
3681         info.type = map->map_type;
3682         info.id = map->id;
3683         info.key_size = map->key_size;
3684         info.value_size = map->value_size;
3685         info.max_entries = map->max_entries;
3686         info.map_flags = map->map_flags;
3687         memcpy(info.name, map->name, sizeof(map->name));
3688
3689         if (map->btf) {
3690                 info.btf_id = btf_obj_id(map->btf);
3691                 info.btf_key_type_id = map->btf_key_type_id;
3692                 info.btf_value_type_id = map->btf_value_type_id;
3693         }
3694         info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3695
3696         if (bpf_map_is_dev_bound(map)) {
3697                 err = bpf_map_offload_info_fill(&info, map);
3698                 if (err)
3699                         return err;
3700         }
3701
3702         if (copy_to_user(uinfo, &info, info_len) ||
3703             put_user(info_len, &uattr->info.info_len))
3704                 return -EFAULT;
3705
3706         return 0;
3707 }
3708
3709 static int bpf_btf_get_info_by_fd(struct file *file,
3710                                   struct btf *btf,
3711                                   const union bpf_attr *attr,
3712                                   union bpf_attr __user *uattr)
3713 {
3714         struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3715         u32 info_len = attr->info.info_len;
3716         int err;
3717
3718         err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
3719         if (err)
3720                 return err;
3721
3722         return btf_get_info_by_fd(btf, attr, uattr);
3723 }
3724
3725 static int bpf_link_get_info_by_fd(struct file *file,
3726                                   struct bpf_link *link,
3727                                   const union bpf_attr *attr,
3728                                   union bpf_attr __user *uattr)
3729 {
3730         struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3731         struct bpf_link_info info;
3732         u32 info_len = attr->info.info_len;
3733         int err;
3734
3735         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3736         if (err)
3737                 return err;
3738         info_len = min_t(u32, sizeof(info), info_len);
3739
3740         memset(&info, 0, sizeof(info));
3741         if (copy_from_user(&info, uinfo, info_len))
3742                 return -EFAULT;
3743
3744         info.type = link->type;
3745         info.id = link->id;
3746         info.prog_id = link->prog->aux->id;
3747
3748         if (link->ops->fill_link_info) {
3749                 err = link->ops->fill_link_info(link, &info);
3750                 if (err)
3751                         return err;
3752         }
3753
3754         if (copy_to_user(uinfo, &info, info_len) ||
3755             put_user(info_len, &uattr->info.info_len))
3756                 return -EFAULT;
3757
3758         return 0;
3759 }
3760
3761
3762 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3763
3764 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3765                                   union bpf_attr __user *uattr)
3766 {
3767         int ufd = attr->info.bpf_fd;
3768         struct fd f;
3769         int err;
3770
3771         if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3772                 return -EINVAL;
3773
3774         f = fdget(ufd);
3775         if (!f.file)
3776                 return -EBADFD;
3777
3778         if (f.file->f_op == &bpf_prog_fops)
3779                 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
3780                                               uattr);
3781         else if (f.file->f_op == &bpf_map_fops)
3782                 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
3783                                              uattr);
3784         else if (f.file->f_op == &btf_fops)
3785                 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
3786         else if (f.file->f_op == &bpf_link_fops)
3787                 err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
3788                                               attr, uattr);
3789         else
3790                 err = -EINVAL;
3791
3792         fdput(f);
3793         return err;
3794 }
3795
3796 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3797
3798 static int bpf_btf_load(const union bpf_attr *attr)
3799 {
3800         if (CHECK_ATTR(BPF_BTF_LOAD))
3801                 return -EINVAL;
3802
3803         if (!bpf_capable())
3804                 return -EPERM;
3805
3806         return btf_new_fd(attr);
3807 }
3808
3809 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3810
3811 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3812 {
3813         if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
3814                 return -EINVAL;
3815
3816         if (!capable(CAP_SYS_ADMIN))
3817                 return -EPERM;
3818
3819         return btf_get_fd_by_id(attr->btf_id);
3820 }
3821
3822 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
3823                                     union bpf_attr __user *uattr,
3824                                     u32 prog_id, u32 fd_type,
3825                                     const char *buf, u64 probe_offset,
3826                                     u64 probe_addr)
3827 {
3828         char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
3829         u32 len = buf ? strlen(buf) : 0, input_len;
3830         int err = 0;
3831
3832         if (put_user(len, &uattr->task_fd_query.buf_len))
3833                 return -EFAULT;
3834         input_len = attr->task_fd_query.buf_len;
3835         if (input_len && ubuf) {
3836                 if (!len) {
3837                         /* nothing to copy, just make ubuf NULL terminated */
3838                         char zero = '\0';
3839
3840                         if (put_user(zero, ubuf))
3841                                 return -EFAULT;
3842                 } else if (input_len >= len + 1) {
3843                         /* ubuf can hold the string with NULL terminator */
3844                         if (copy_to_user(ubuf, buf, len + 1))
3845                                 return -EFAULT;
3846                 } else {
3847                         /* ubuf cannot hold the string with NULL terminator,
3848                          * do a partial copy with NULL terminator.
3849                          */
3850                         char zero = '\0';
3851
3852                         err = -ENOSPC;
3853                         if (copy_to_user(ubuf, buf, input_len - 1))
3854                                 return -EFAULT;
3855                         if (put_user(zero, ubuf + input_len - 1))
3856                                 return -EFAULT;
3857                 }
3858         }
3859
3860         if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
3861             put_user(fd_type, &uattr->task_fd_query.fd_type) ||
3862             put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
3863             put_user(probe_addr, &uattr->task_fd_query.probe_addr))
3864                 return -EFAULT;
3865
3866         return err;
3867 }
3868
3869 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
3870
3871 static int bpf_task_fd_query(const union bpf_attr *attr,
3872                              union bpf_attr __user *uattr)
3873 {
3874         pid_t pid = attr->task_fd_query.pid;
3875         u32 fd = attr->task_fd_query.fd;
3876         const struct perf_event *event;
3877         struct task_struct *task;
3878         struct file *file;
3879         int err;
3880
3881         if (CHECK_ATTR(BPF_TASK_FD_QUERY))
3882                 return -EINVAL;
3883
3884         if (!capable(CAP_SYS_ADMIN))
3885                 return -EPERM;
3886
3887         if (attr->task_fd_query.flags != 0)
3888                 return -EINVAL;
3889
3890         task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3891         if (!task)
3892                 return -ENOENT;
3893
3894         err = 0;
3895         file = fget_task(task, fd);
3896         put_task_struct(task);
3897         if (!file)
3898                 return -EBADF;
3899
3900         if (file->f_op == &bpf_link_fops) {
3901                 struct bpf_link *link = file->private_data;
3902
3903                 if (link->ops == &bpf_raw_tp_link_lops) {
3904                         struct bpf_raw_tp_link *raw_tp =
3905                                 container_of(link, struct bpf_raw_tp_link, link);
3906                         struct bpf_raw_event_map *btp = raw_tp->btp;
3907
3908                         err = bpf_task_fd_query_copy(attr, uattr,
3909                                                      raw_tp->link.prog->aux->id,
3910                                                      BPF_FD_TYPE_RAW_TRACEPOINT,
3911                                                      btp->tp->name, 0, 0);
3912                         goto put_file;
3913                 }
3914                 goto out_not_supp;
3915         }
3916
3917         event = perf_get_event(file);
3918         if (!IS_ERR(event)) {
3919                 u64 probe_offset, probe_addr;
3920                 u32 prog_id, fd_type;
3921                 const char *buf;
3922
3923                 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
3924                                               &buf, &probe_offset,
3925                                               &probe_addr);
3926                 if (!err)
3927                         err = bpf_task_fd_query_copy(attr, uattr, prog_id,
3928                                                      fd_type, buf,
3929                                                      probe_offset,
3930                                                      probe_addr);
3931                 goto put_file;
3932         }
3933
3934 out_not_supp:
3935         err = -ENOTSUPP;
3936 put_file:
3937         fput(file);
3938         return err;
3939 }
3940
3941 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
3942
3943 #define BPF_DO_BATCH(fn)                        \
3944         do {                                    \
3945                 if (!fn) {                      \
3946                         err = -ENOTSUPP;        \
3947                         goto err_put;           \
3948                 }                               \
3949                 err = fn(map, attr, uattr);     \
3950         } while (0)
3951
3952 static int bpf_map_do_batch(const union bpf_attr *attr,
3953                             union bpf_attr __user *uattr,
3954                             int cmd)
3955 {
3956         struct bpf_map *map;
3957         int err, ufd;
3958         struct fd f;
3959
3960         if (CHECK_ATTR(BPF_MAP_BATCH))
3961                 return -EINVAL;
3962
3963         ufd = attr->batch.map_fd;
3964         f = fdget(ufd);
3965         map = __bpf_map_get(f);
3966         if (IS_ERR(map))
3967                 return PTR_ERR(map);
3968
3969         if ((cmd == BPF_MAP_LOOKUP_BATCH ||
3970              cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
3971             !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
3972                 err = -EPERM;
3973                 goto err_put;
3974         }
3975
3976         if (cmd != BPF_MAP_LOOKUP_BATCH &&
3977             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
3978                 err = -EPERM;
3979                 goto err_put;
3980         }
3981
3982         if (cmd == BPF_MAP_LOOKUP_BATCH)
3983                 BPF_DO_BATCH(map->ops->map_lookup_batch);
3984         else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
3985                 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
3986         else if (cmd == BPF_MAP_UPDATE_BATCH)
3987                 BPF_DO_BATCH(map->ops->map_update_batch);
3988         else
3989                 BPF_DO_BATCH(map->ops->map_delete_batch);
3990
3991 err_put:
3992         fdput(f);
3993         return err;
3994 }
3995
3996 static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3997 {
3998         if (attr->link_create.attach_type != prog->expected_attach_type)
3999                 return -EINVAL;
4000
4001         if (prog->expected_attach_type == BPF_TRACE_ITER)
4002                 return bpf_iter_link_attach(attr, prog);
4003         else if (prog->type == BPF_PROG_TYPE_EXT)
4004                 return bpf_tracing_prog_attach(prog,
4005                                                attr->link_create.target_fd,
4006                                                attr->link_create.target_btf_id);
4007         return -EINVAL;
4008 }
4009
4010 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
4011 static int link_create(union bpf_attr *attr)
4012 {
4013         enum bpf_prog_type ptype;
4014         struct bpf_prog *prog;
4015         int ret;
4016
4017         if (CHECK_ATTR(BPF_LINK_CREATE))
4018                 return -EINVAL;
4019
4020         prog = bpf_prog_get(attr->link_create.prog_fd);
4021         if (IS_ERR(prog))
4022                 return PTR_ERR(prog);
4023
4024         ret = bpf_prog_attach_check_attach_type(prog,
4025                                                 attr->link_create.attach_type);
4026         if (ret)
4027                 goto out;
4028
4029         if (prog->type == BPF_PROG_TYPE_EXT) {
4030                 ret = tracing_bpf_link_attach(attr, prog);
4031                 goto out;
4032         }
4033
4034         ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4035         if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4036                 ret = -EINVAL;
4037                 goto out;
4038         }
4039
4040         switch (ptype) {
4041         case BPF_PROG_TYPE_CGROUP_SKB:
4042         case BPF_PROG_TYPE_CGROUP_SOCK:
4043         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4044         case BPF_PROG_TYPE_SOCK_OPS:
4045         case BPF_PROG_TYPE_CGROUP_DEVICE:
4046         case BPF_PROG_TYPE_CGROUP_SYSCTL:
4047         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4048                 ret = cgroup_bpf_link_attach(attr, prog);
4049                 break;
4050         case BPF_PROG_TYPE_TRACING:
4051                 ret = tracing_bpf_link_attach(attr, prog);
4052                 break;
4053         case BPF_PROG_TYPE_FLOW_DISSECTOR:
4054         case BPF_PROG_TYPE_SK_LOOKUP:
4055                 ret = netns_bpf_link_create(attr, prog);
4056                 break;
4057 #ifdef CONFIG_NET
4058         case BPF_PROG_TYPE_XDP:
4059                 ret = bpf_xdp_link_attach(attr, prog);
4060                 break;
4061 #endif
4062         default:
4063                 ret = -EINVAL;
4064         }
4065
4066 out:
4067         if (ret < 0)
4068                 bpf_prog_put(prog);
4069         return ret;
4070 }
4071
4072 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
4073
4074 static int link_update(union bpf_attr *attr)
4075 {
4076         struct bpf_prog *old_prog = NULL, *new_prog;
4077         struct bpf_link *link;
4078         u32 flags;
4079         int ret;
4080
4081         if (CHECK_ATTR(BPF_LINK_UPDATE))
4082                 return -EINVAL;
4083
4084         flags = attr->link_update.flags;
4085         if (flags & ~BPF_F_REPLACE)
4086                 return -EINVAL;
4087
4088         link = bpf_link_get_from_fd(attr->link_update.link_fd);
4089         if (IS_ERR(link))
4090                 return PTR_ERR(link);
4091
4092         new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
4093         if (IS_ERR(new_prog)) {
4094                 ret = PTR_ERR(new_prog);
4095                 goto out_put_link;
4096         }
4097
4098         if (flags & BPF_F_REPLACE) {
4099                 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
4100                 if (IS_ERR(old_prog)) {
4101                         ret = PTR_ERR(old_prog);
4102                         old_prog = NULL;
4103                         goto out_put_progs;
4104                 }
4105         } else if (attr->link_update.old_prog_fd) {
4106                 ret = -EINVAL;
4107                 goto out_put_progs;
4108         }
4109
4110         if (link->ops->update_prog)
4111                 ret = link->ops->update_prog(link, new_prog, old_prog);
4112         else
4113                 ret = -EINVAL;
4114
4115 out_put_progs:
4116         if (old_prog)
4117                 bpf_prog_put(old_prog);
4118         if (ret)
4119                 bpf_prog_put(new_prog);
4120 out_put_link:
4121         bpf_link_put(link);
4122         return ret;
4123 }
4124
4125 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4126
4127 static int link_detach(union bpf_attr *attr)
4128 {
4129         struct bpf_link *link;
4130         int ret;
4131
4132         if (CHECK_ATTR(BPF_LINK_DETACH))
4133                 return -EINVAL;
4134
4135         link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4136         if (IS_ERR(link))
4137                 return PTR_ERR(link);
4138
4139         if (link->ops->detach)
4140                 ret = link->ops->detach(link);
4141         else
4142                 ret = -EOPNOTSUPP;
4143
4144         bpf_link_put(link);
4145         return ret;
4146 }
4147
4148 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4149 {
4150         return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4151 }
4152
4153 struct bpf_link *bpf_link_by_id(u32 id)
4154 {
4155         struct bpf_link *link;
4156
4157         if (!id)
4158                 return ERR_PTR(-ENOENT);
4159
4160         spin_lock_bh(&link_idr_lock);
4161         /* before link is "settled", ID is 0, pretend it doesn't exist yet */
4162         link = idr_find(&link_idr, id);
4163         if (link) {
4164                 if (link->id)
4165                         link = bpf_link_inc_not_zero(link);
4166                 else
4167                         link = ERR_PTR(-EAGAIN);
4168         } else {
4169                 link = ERR_PTR(-ENOENT);
4170         }
4171         spin_unlock_bh(&link_idr_lock);
4172         return link;
4173 }
4174
4175 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4176
4177 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4178 {
4179         struct bpf_link *link;
4180         u32 id = attr->link_id;
4181         int fd;
4182
4183         if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4184                 return -EINVAL;
4185
4186         if (!capable(CAP_SYS_ADMIN))
4187                 return -EPERM;
4188
4189         link = bpf_link_by_id(id);
4190         if (IS_ERR(link))
4191                 return PTR_ERR(link);
4192
4193         fd = bpf_link_new_fd(link);
4194         if (fd < 0)
4195                 bpf_link_put(link);
4196
4197         return fd;
4198 }
4199
4200 DEFINE_MUTEX(bpf_stats_enabled_mutex);
4201
4202 static int bpf_stats_release(struct inode *inode, struct file *file)
4203 {
4204         mutex_lock(&bpf_stats_enabled_mutex);
4205         static_key_slow_dec(&bpf_stats_enabled_key.key);
4206         mutex_unlock(&bpf_stats_enabled_mutex);
4207         return 0;
4208 }
4209
4210 static const struct file_operations bpf_stats_fops = {
4211         .release = bpf_stats_release,
4212 };
4213
4214 static int bpf_enable_runtime_stats(void)
4215 {
4216         int fd;
4217
4218         mutex_lock(&bpf_stats_enabled_mutex);
4219
4220         /* Set a very high limit to avoid overflow */
4221         if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4222                 mutex_unlock(&bpf_stats_enabled_mutex);
4223                 return -EBUSY;
4224         }
4225
4226         fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4227         if (fd >= 0)
4228                 static_key_slow_inc(&bpf_stats_enabled_key.key);
4229
4230         mutex_unlock(&bpf_stats_enabled_mutex);
4231         return fd;
4232 }
4233
4234 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4235
4236 static int bpf_enable_stats(union bpf_attr *attr)
4237 {
4238
4239         if (CHECK_ATTR(BPF_ENABLE_STATS))
4240                 return -EINVAL;
4241
4242         if (!capable(CAP_SYS_ADMIN))
4243                 return -EPERM;
4244
4245         switch (attr->enable_stats.type) {
4246         case BPF_STATS_RUN_TIME:
4247                 return bpf_enable_runtime_stats();
4248         default:
4249                 break;
4250         }
4251         return -EINVAL;
4252 }
4253
4254 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4255
4256 static int bpf_iter_create(union bpf_attr *attr)
4257 {
4258         struct bpf_link *link;
4259         int err;
4260
4261         if (CHECK_ATTR(BPF_ITER_CREATE))
4262                 return -EINVAL;
4263
4264         if (attr->iter_create.flags)
4265                 return -EINVAL;
4266
4267         link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4268         if (IS_ERR(link))
4269                 return PTR_ERR(link);
4270
4271         err = bpf_iter_new_fd(link);
4272         bpf_link_put(link);
4273
4274         return err;
4275 }
4276
4277 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4278
4279 static int bpf_prog_bind_map(union bpf_attr *attr)
4280 {
4281         struct bpf_prog *prog;
4282         struct bpf_map *map;
4283         struct bpf_map **used_maps_old, **used_maps_new;
4284         int i, ret = 0;
4285
4286         if (CHECK_ATTR(BPF_PROG_BIND_MAP))
4287                 return -EINVAL;
4288
4289         if (attr->prog_bind_map.flags)
4290                 return -EINVAL;
4291
4292         prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4293         if (IS_ERR(prog))
4294                 return PTR_ERR(prog);
4295
4296         map = bpf_map_get(attr->prog_bind_map.map_fd);
4297         if (IS_ERR(map)) {
4298                 ret = PTR_ERR(map);
4299                 goto out_prog_put;
4300         }
4301
4302         mutex_lock(&prog->aux->used_maps_mutex);
4303
4304         used_maps_old = prog->aux->used_maps;
4305
4306         for (i = 0; i < prog->aux->used_map_cnt; i++)
4307                 if (used_maps_old[i] == map) {
4308                         bpf_map_put(map);
4309                         goto out_unlock;
4310                 }
4311
4312         used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
4313                                       sizeof(used_maps_new[0]),
4314                                       GFP_KERNEL);
4315         if (!used_maps_new) {
4316                 ret = -ENOMEM;
4317                 goto out_unlock;
4318         }
4319
4320         memcpy(used_maps_new, used_maps_old,
4321                sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4322         used_maps_new[prog->aux->used_map_cnt] = map;
4323
4324         prog->aux->used_map_cnt++;
4325         prog->aux->used_maps = used_maps_new;
4326
4327         kfree(used_maps_old);
4328
4329 out_unlock:
4330         mutex_unlock(&prog->aux->used_maps_mutex);
4331
4332         if (ret)
4333                 bpf_map_put(map);
4334 out_prog_put:
4335         bpf_prog_put(prog);
4336         return ret;
4337 }
4338
4339 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4340 {
4341         union bpf_attr attr;
4342         int err;
4343
4344         if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4345                 return -EPERM;
4346
4347         err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4348         if (err)
4349                 return err;
4350         size = min_t(u32, size, sizeof(attr));
4351
4352         /* copy attributes from user space, may be less than sizeof(bpf_attr) */
4353         memset(&attr, 0, sizeof(attr));
4354         if (copy_from_user(&attr, uattr, size) != 0)
4355                 return -EFAULT;
4356
4357         err = security_bpf(cmd, &attr, size);
4358         if (err < 0)
4359                 return err;
4360
4361         switch (cmd) {
4362         case BPF_MAP_CREATE:
4363                 err = map_create(&attr);
4364                 break;
4365         case BPF_MAP_LOOKUP_ELEM:
4366                 err = map_lookup_elem(&attr);
4367                 break;
4368         case BPF_MAP_UPDATE_ELEM:
4369                 err = map_update_elem(&attr);
4370                 break;
4371         case BPF_MAP_DELETE_ELEM:
4372                 err = map_delete_elem(&attr);
4373                 break;
4374         case BPF_MAP_GET_NEXT_KEY:
4375                 err = map_get_next_key(&attr);
4376                 break;
4377         case BPF_MAP_FREEZE:
4378                 err = map_freeze(&attr);
4379                 break;
4380         case BPF_PROG_LOAD:
4381                 err = bpf_prog_load(&attr, uattr);
4382                 break;
4383         case BPF_OBJ_PIN:
4384                 err = bpf_obj_pin(&attr);
4385                 break;
4386         case BPF_OBJ_GET:
4387                 err = bpf_obj_get(&attr);
4388                 break;
4389         case BPF_PROG_ATTACH:
4390                 err = bpf_prog_attach(&attr);
4391                 break;
4392         case BPF_PROG_DETACH:
4393                 err = bpf_prog_detach(&attr);
4394                 break;
4395         case BPF_PROG_QUERY:
4396                 err = bpf_prog_query(&attr, uattr);
4397                 break;
4398         case BPF_PROG_TEST_RUN:
4399                 err = bpf_prog_test_run(&attr, uattr);
4400                 break;
4401         case BPF_PROG_GET_NEXT_ID:
4402                 err = bpf_obj_get_next_id(&attr, uattr,
4403                                           &prog_idr, &prog_idr_lock);
4404                 break;
4405         case BPF_MAP_GET_NEXT_ID:
4406                 err = bpf_obj_get_next_id(&attr, uattr,
4407                                           &map_idr, &map_idr_lock);
4408                 break;
4409         case BPF_BTF_GET_NEXT_ID:
4410                 err = bpf_obj_get_next_id(&attr, uattr,
4411                                           &btf_idr, &btf_idr_lock);
4412                 break;
4413         case BPF_PROG_GET_FD_BY_ID:
4414                 err = bpf_prog_get_fd_by_id(&attr);
4415                 break;
4416         case BPF_MAP_GET_FD_BY_ID:
4417                 err = bpf_map_get_fd_by_id(&attr);
4418                 break;
4419         case BPF_OBJ_GET_INFO_BY_FD:
4420                 err = bpf_obj_get_info_by_fd(&attr, uattr);
4421                 break;
4422         case BPF_RAW_TRACEPOINT_OPEN:
4423                 err = bpf_raw_tracepoint_open(&attr);
4424                 break;
4425         case BPF_BTF_LOAD:
4426                 err = bpf_btf_load(&attr);
4427                 break;
4428         case BPF_BTF_GET_FD_BY_ID:
4429                 err = bpf_btf_get_fd_by_id(&attr);
4430                 break;
4431         case BPF_TASK_FD_QUERY:
4432                 err = bpf_task_fd_query(&attr, uattr);
4433                 break;
4434         case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4435                 err = map_lookup_and_delete_elem(&attr);
4436                 break;
4437         case BPF_MAP_LOOKUP_BATCH:
4438                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
4439                 break;
4440         case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4441                 err = bpf_map_do_batch(&attr, uattr,
4442                                        BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4443                 break;
4444         case BPF_MAP_UPDATE_BATCH:
4445                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
4446                 break;
4447         case BPF_MAP_DELETE_BATCH:
4448                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
4449                 break;
4450         case BPF_LINK_CREATE:
4451                 err = link_create(&attr);
4452                 break;
4453         case BPF_LINK_UPDATE:
4454                 err = link_update(&attr);
4455                 break;
4456         case BPF_LINK_GET_FD_BY_ID:
4457                 err = bpf_link_get_fd_by_id(&attr);
4458                 break;
4459         case BPF_LINK_GET_NEXT_ID:
4460                 err = bpf_obj_get_next_id(&attr, uattr,
4461                                           &link_idr, &link_idr_lock);
4462                 break;
4463         case BPF_ENABLE_STATS:
4464                 err = bpf_enable_stats(&attr);
4465                 break;
4466         case BPF_ITER_CREATE:
4467                 err = bpf_iter_create(&attr);
4468                 break;
4469         case BPF_LINK_DETACH:
4470                 err = link_detach(&attr);
4471                 break;
4472         case BPF_PROG_BIND_MAP:
4473                 err = bpf_prog_bind_map(&attr);
4474                 break;
4475         default:
4476                 err = -EINVAL;
4477                 break;
4478         }
4479
4480         return err;
4481 }