Merge tag 'x86_build_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / kernel / bpf / syscall.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/bpf_verifier.h>
8 #include <linux/btf.h>
9 #include <linux/syscalls.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/vmalloc.h>
13 #include <linux/mmzone.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/fdtable.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/license.h>
19 #include <linux/filter.h>
20 #include <linux/kernel.h>
21 #include <linux/idr.h>
22 #include <linux/cred.h>
23 #include <linux/timekeeping.h>
24 #include <linux/ctype.h>
25 #include <linux/nospec.h>
26 #include <linux/audit.h>
27 #include <uapi/linux/btf.h>
28 #include <linux/pgtable.h>
29 #include <linux/bpf_lsm.h>
30 #include <linux/poll.h>
31 #include <linux/bpf-netns.h>
32 #include <linux/rcupdate_trace.h>
33 #include <linux/memcontrol.h>
34
35 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
36                           (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
37                           (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
38 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
39 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
40 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
41                         IS_FD_HASH(map))
42
43 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
44
45 DEFINE_PER_CPU(int, bpf_prog_active);
46 static DEFINE_IDR(prog_idr);
47 static DEFINE_SPINLOCK(prog_idr_lock);
48 static DEFINE_IDR(map_idr);
49 static DEFINE_SPINLOCK(map_idr_lock);
50 static DEFINE_IDR(link_idr);
51 static DEFINE_SPINLOCK(link_idr_lock);
52
53 int sysctl_unprivileged_bpf_disabled __read_mostly;
54
55 static const struct bpf_map_ops * const bpf_map_types[] = {
56 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
57 #define BPF_MAP_TYPE(_id, _ops) \
58         [_id] = &_ops,
59 #define BPF_LINK_TYPE(_id, _name)
60 #include <linux/bpf_types.h>
61 #undef BPF_PROG_TYPE
62 #undef BPF_MAP_TYPE
63 #undef BPF_LINK_TYPE
64 };
65
66 /*
67  * If we're handed a bigger struct than we know of, ensure all the unknown bits
68  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
69  * we don't know about yet.
70  *
71  * There is a ToCToU between this function call and the following
72  * copy_from_user() call. However, this is not a concern since this function is
73  * meant to be a future-proofing of bits.
74  */
75 int bpf_check_uarg_tail_zero(void __user *uaddr,
76                              size_t expected_size,
77                              size_t actual_size)
78 {
79         unsigned char __user *addr = uaddr + expected_size;
80         int res;
81
82         if (unlikely(actual_size > PAGE_SIZE))  /* silly large */
83                 return -E2BIG;
84
85         if (actual_size <= expected_size)
86                 return 0;
87
88         res = check_zeroed_user(addr, actual_size - expected_size);
89         if (res < 0)
90                 return res;
91         return res ? 0 : -E2BIG;
92 }
93
94 const struct bpf_map_ops bpf_map_offload_ops = {
95         .map_meta_equal = bpf_map_meta_equal,
96         .map_alloc = bpf_map_offload_map_alloc,
97         .map_free = bpf_map_offload_map_free,
98         .map_check_btf = map_check_no_btf,
99 };
100
101 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
102 {
103         const struct bpf_map_ops *ops;
104         u32 type = attr->map_type;
105         struct bpf_map *map;
106         int err;
107
108         if (type >= ARRAY_SIZE(bpf_map_types))
109                 return ERR_PTR(-EINVAL);
110         type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
111         ops = bpf_map_types[type];
112         if (!ops)
113                 return ERR_PTR(-EINVAL);
114
115         if (ops->map_alloc_check) {
116                 err = ops->map_alloc_check(attr);
117                 if (err)
118                         return ERR_PTR(err);
119         }
120         if (attr->map_ifindex)
121                 ops = &bpf_map_offload_ops;
122         map = ops->map_alloc(attr);
123         if (IS_ERR(map))
124                 return map;
125         map->ops = ops;
126         map->map_type = type;
127         return map;
128 }
129
130 static u32 bpf_map_value_size(const struct bpf_map *map)
131 {
132         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
133             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
134             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
135             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
136                 return round_up(map->value_size, 8) * num_possible_cpus();
137         else if (IS_FD_MAP(map))
138                 return sizeof(u32);
139         else
140                 return  map->value_size;
141 }
142
143 static void maybe_wait_bpf_programs(struct bpf_map *map)
144 {
145         /* Wait for any running BPF programs to complete so that
146          * userspace, when we return to it, knows that all programs
147          * that could be running use the new map value.
148          */
149         if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
150             map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
151                 synchronize_rcu();
152 }
153
154 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
155                                 void *value, __u64 flags)
156 {
157         int err;
158
159         /* Need to create a kthread, thus must support schedule */
160         if (bpf_map_is_dev_bound(map)) {
161                 return bpf_map_offload_update_elem(map, key, value, flags);
162         } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
163                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
164                 return map->ops->map_update_elem(map, key, value, flags);
165         } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
166                    map->map_type == BPF_MAP_TYPE_SOCKMAP) {
167                 return sock_map_update_elem_sys(map, key, value, flags);
168         } else if (IS_FD_PROG_ARRAY(map)) {
169                 return bpf_fd_array_map_update_elem(map, f.file, key, value,
170                                                     flags);
171         }
172
173         bpf_disable_instrumentation();
174         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
175             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
176                 err = bpf_percpu_hash_update(map, key, value, flags);
177         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
178                 err = bpf_percpu_array_update(map, key, value, flags);
179         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
180                 err = bpf_percpu_cgroup_storage_update(map, key, value,
181                                                        flags);
182         } else if (IS_FD_ARRAY(map)) {
183                 rcu_read_lock();
184                 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
185                                                    flags);
186                 rcu_read_unlock();
187         } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
188                 rcu_read_lock();
189                 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
190                                                   flags);
191                 rcu_read_unlock();
192         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
193                 /* rcu_read_lock() is not needed */
194                 err = bpf_fd_reuseport_array_update_elem(map, key, value,
195                                                          flags);
196         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
197                    map->map_type == BPF_MAP_TYPE_STACK) {
198                 err = map->ops->map_push_elem(map, value, flags);
199         } else {
200                 rcu_read_lock();
201                 err = map->ops->map_update_elem(map, key, value, flags);
202                 rcu_read_unlock();
203         }
204         bpf_enable_instrumentation();
205         maybe_wait_bpf_programs(map);
206
207         return err;
208 }
209
210 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
211                               __u64 flags)
212 {
213         void *ptr;
214         int err;
215
216         if (bpf_map_is_dev_bound(map))
217                 return bpf_map_offload_lookup_elem(map, key, value);
218
219         bpf_disable_instrumentation();
220         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
221             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
222                 err = bpf_percpu_hash_copy(map, key, value);
223         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
224                 err = bpf_percpu_array_copy(map, key, value);
225         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
226                 err = bpf_percpu_cgroup_storage_copy(map, key, value);
227         } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
228                 err = bpf_stackmap_copy(map, key, value);
229         } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
230                 err = bpf_fd_array_map_lookup_elem(map, key, value);
231         } else if (IS_FD_HASH(map)) {
232                 err = bpf_fd_htab_map_lookup_elem(map, key, value);
233         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
234                 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
235         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
236                    map->map_type == BPF_MAP_TYPE_STACK) {
237                 err = map->ops->map_peek_elem(map, value);
238         } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
239                 /* struct_ops map requires directly updating "value" */
240                 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
241         } else {
242                 rcu_read_lock();
243                 if (map->ops->map_lookup_elem_sys_only)
244                         ptr = map->ops->map_lookup_elem_sys_only(map, key);
245                 else
246                         ptr = map->ops->map_lookup_elem(map, key);
247                 if (IS_ERR(ptr)) {
248                         err = PTR_ERR(ptr);
249                 } else if (!ptr) {
250                         err = -ENOENT;
251                 } else {
252                         err = 0;
253                         if (flags & BPF_F_LOCK)
254                                 /* lock 'ptr' and copy everything but lock */
255                                 copy_map_value_locked(map, value, ptr, true);
256                         else
257                                 copy_map_value(map, value, ptr);
258                         /* mask lock, since value wasn't zero inited */
259                         check_and_init_map_lock(map, value);
260                 }
261                 rcu_read_unlock();
262         }
263
264         bpf_enable_instrumentation();
265         maybe_wait_bpf_programs(map);
266
267         return err;
268 }
269
270 /* Please, do not use this function outside from the map creation path
271  * (e.g. in map update path) without taking care of setting the active
272  * memory cgroup (see at bpf_map_kmalloc_node() for example).
273  */
274 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
275 {
276         /* We really just want to fail instead of triggering OOM killer
277          * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
278          * which is used for lower order allocation requests.
279          *
280          * It has been observed that higher order allocation requests done by
281          * vmalloc with __GFP_NORETRY being set might fail due to not trying
282          * to reclaim memory from the page cache, thus we set
283          * __GFP_RETRY_MAYFAIL to avoid such situations.
284          */
285
286         const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
287         unsigned int flags = 0;
288         unsigned long align = 1;
289         void *area;
290
291         if (size >= SIZE_MAX)
292                 return NULL;
293
294         /* kmalloc()'ed memory can't be mmap()'ed */
295         if (mmapable) {
296                 BUG_ON(!PAGE_ALIGNED(size));
297                 align = SHMLBA;
298                 flags = VM_USERMAP;
299         } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
300                 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
301                                     numa_node);
302                 if (area != NULL)
303                         return area;
304         }
305
306         return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
307                         gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
308                         flags, numa_node, __builtin_return_address(0));
309 }
310
311 void *bpf_map_area_alloc(u64 size, int numa_node)
312 {
313         return __bpf_map_area_alloc(size, numa_node, false);
314 }
315
316 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
317 {
318         return __bpf_map_area_alloc(size, numa_node, true);
319 }
320
321 void bpf_map_area_free(void *area)
322 {
323         kvfree(area);
324 }
325
326 static u32 bpf_map_flags_retain_permanent(u32 flags)
327 {
328         /* Some map creation flags are not tied to the map object but
329          * rather to the map fd instead, so they have no meaning upon
330          * map object inspection since multiple file descriptors with
331          * different (access) properties can exist here. Thus, given
332          * this has zero meaning for the map itself, lets clear these
333          * from here.
334          */
335         return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
336 }
337
338 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
339 {
340         map->map_type = attr->map_type;
341         map->key_size = attr->key_size;
342         map->value_size = attr->value_size;
343         map->max_entries = attr->max_entries;
344         map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
345         map->numa_node = bpf_map_attr_numa_node(attr);
346 }
347
348 static int bpf_map_alloc_id(struct bpf_map *map)
349 {
350         int id;
351
352         idr_preload(GFP_KERNEL);
353         spin_lock_bh(&map_idr_lock);
354         id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
355         if (id > 0)
356                 map->id = id;
357         spin_unlock_bh(&map_idr_lock);
358         idr_preload_end();
359
360         if (WARN_ON_ONCE(!id))
361                 return -ENOSPC;
362
363         return id > 0 ? 0 : id;
364 }
365
366 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
367 {
368         unsigned long flags;
369
370         /* Offloaded maps are removed from the IDR store when their device
371          * disappears - even if someone holds an fd to them they are unusable,
372          * the memory is gone, all ops will fail; they are simply waiting for
373          * refcnt to drop to be freed.
374          */
375         if (!map->id)
376                 return;
377
378         if (do_idr_lock)
379                 spin_lock_irqsave(&map_idr_lock, flags);
380         else
381                 __acquire(&map_idr_lock);
382
383         idr_remove(&map_idr, map->id);
384         map->id = 0;
385
386         if (do_idr_lock)
387                 spin_unlock_irqrestore(&map_idr_lock, flags);
388         else
389                 __release(&map_idr_lock);
390 }
391
392 #ifdef CONFIG_MEMCG_KMEM
393 static void bpf_map_save_memcg(struct bpf_map *map)
394 {
395         map->memcg = get_mem_cgroup_from_mm(current->mm);
396 }
397
398 static void bpf_map_release_memcg(struct bpf_map *map)
399 {
400         mem_cgroup_put(map->memcg);
401 }
402
403 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
404                            int node)
405 {
406         struct mem_cgroup *old_memcg;
407         void *ptr;
408
409         old_memcg = set_active_memcg(map->memcg);
410         ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
411         set_active_memcg(old_memcg);
412
413         return ptr;
414 }
415
416 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
417 {
418         struct mem_cgroup *old_memcg;
419         void *ptr;
420
421         old_memcg = set_active_memcg(map->memcg);
422         ptr = kzalloc(size, flags | __GFP_ACCOUNT);
423         set_active_memcg(old_memcg);
424
425         return ptr;
426 }
427
428 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
429                                     size_t align, gfp_t flags)
430 {
431         struct mem_cgroup *old_memcg;
432         void __percpu *ptr;
433
434         old_memcg = set_active_memcg(map->memcg);
435         ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
436         set_active_memcg(old_memcg);
437
438         return ptr;
439 }
440
441 #else
442 static void bpf_map_save_memcg(struct bpf_map *map)
443 {
444 }
445
446 static void bpf_map_release_memcg(struct bpf_map *map)
447 {
448 }
449 #endif
450
451 /* called from workqueue */
452 static void bpf_map_free_deferred(struct work_struct *work)
453 {
454         struct bpf_map *map = container_of(work, struct bpf_map, work);
455
456         security_bpf_map_free(map);
457         bpf_map_release_memcg(map);
458         /* implementation dependent freeing */
459         map->ops->map_free(map);
460 }
461
462 static void bpf_map_put_uref(struct bpf_map *map)
463 {
464         if (atomic64_dec_and_test(&map->usercnt)) {
465                 if (map->ops->map_release_uref)
466                         map->ops->map_release_uref(map);
467         }
468 }
469
470 /* decrement map refcnt and schedule it for freeing via workqueue
471  * (unrelying map implementation ops->map_free() might sleep)
472  */
473 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
474 {
475         if (atomic64_dec_and_test(&map->refcnt)) {
476                 /* bpf_map_free_id() must be called first */
477                 bpf_map_free_id(map, do_idr_lock);
478                 btf_put(map->btf);
479                 INIT_WORK(&map->work, bpf_map_free_deferred);
480                 schedule_work(&map->work);
481         }
482 }
483
484 void bpf_map_put(struct bpf_map *map)
485 {
486         __bpf_map_put(map, true);
487 }
488 EXPORT_SYMBOL_GPL(bpf_map_put);
489
490 void bpf_map_put_with_uref(struct bpf_map *map)
491 {
492         bpf_map_put_uref(map);
493         bpf_map_put(map);
494 }
495
496 static int bpf_map_release(struct inode *inode, struct file *filp)
497 {
498         struct bpf_map *map = filp->private_data;
499
500         if (map->ops->map_release)
501                 map->ops->map_release(map, filp);
502
503         bpf_map_put_with_uref(map);
504         return 0;
505 }
506
507 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
508 {
509         fmode_t mode = f.file->f_mode;
510
511         /* Our file permissions may have been overridden by global
512          * map permissions facing syscall side.
513          */
514         if (READ_ONCE(map->frozen))
515                 mode &= ~FMODE_CAN_WRITE;
516         return mode;
517 }
518
519 #ifdef CONFIG_PROC_FS
520 /* Provides an approximation of the map's memory footprint.
521  * Used only to provide a backward compatibility and display
522  * a reasonable "memlock" info.
523  */
524 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
525 {
526         unsigned long size;
527
528         size = round_up(map->key_size + bpf_map_value_size(map), 8);
529
530         return round_up(map->max_entries * size, PAGE_SIZE);
531 }
532
533 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
534 {
535         const struct bpf_map *map = filp->private_data;
536         const struct bpf_array *array;
537         u32 type = 0, jited = 0;
538
539         if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
540                 array = container_of(map, struct bpf_array, map);
541                 type  = array->aux->type;
542                 jited = array->aux->jited;
543         }
544
545         seq_printf(m,
546                    "map_type:\t%u\n"
547                    "key_size:\t%u\n"
548                    "value_size:\t%u\n"
549                    "max_entries:\t%u\n"
550                    "map_flags:\t%#x\n"
551                    "memlock:\t%lu\n"
552                    "map_id:\t%u\n"
553                    "frozen:\t%u\n",
554                    map->map_type,
555                    map->key_size,
556                    map->value_size,
557                    map->max_entries,
558                    map->map_flags,
559                    bpf_map_memory_footprint(map),
560                    map->id,
561                    READ_ONCE(map->frozen));
562         if (type) {
563                 seq_printf(m, "owner_prog_type:\t%u\n", type);
564                 seq_printf(m, "owner_jited:\t%u\n", jited);
565         }
566 }
567 #endif
568
569 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
570                               loff_t *ppos)
571 {
572         /* We need this handler such that alloc_file() enables
573          * f_mode with FMODE_CAN_READ.
574          */
575         return -EINVAL;
576 }
577
578 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
579                                size_t siz, loff_t *ppos)
580 {
581         /* We need this handler such that alloc_file() enables
582          * f_mode with FMODE_CAN_WRITE.
583          */
584         return -EINVAL;
585 }
586
587 /* called for any extra memory-mapped regions (except initial) */
588 static void bpf_map_mmap_open(struct vm_area_struct *vma)
589 {
590         struct bpf_map *map = vma->vm_file->private_data;
591
592         if (vma->vm_flags & VM_MAYWRITE) {
593                 mutex_lock(&map->freeze_mutex);
594                 map->writecnt++;
595                 mutex_unlock(&map->freeze_mutex);
596         }
597 }
598
599 /* called for all unmapped memory region (including initial) */
600 static void bpf_map_mmap_close(struct vm_area_struct *vma)
601 {
602         struct bpf_map *map = vma->vm_file->private_data;
603
604         if (vma->vm_flags & VM_MAYWRITE) {
605                 mutex_lock(&map->freeze_mutex);
606                 map->writecnt--;
607                 mutex_unlock(&map->freeze_mutex);
608         }
609 }
610
611 static const struct vm_operations_struct bpf_map_default_vmops = {
612         .open           = bpf_map_mmap_open,
613         .close          = bpf_map_mmap_close,
614 };
615
616 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
617 {
618         struct bpf_map *map = filp->private_data;
619         int err;
620
621         if (!map->ops->map_mmap || map_value_has_spin_lock(map))
622                 return -ENOTSUPP;
623
624         if (!(vma->vm_flags & VM_SHARED))
625                 return -EINVAL;
626
627         mutex_lock(&map->freeze_mutex);
628
629         if (vma->vm_flags & VM_WRITE) {
630                 if (map->frozen) {
631                         err = -EPERM;
632                         goto out;
633                 }
634                 /* map is meant to be read-only, so do not allow mapping as
635                  * writable, because it's possible to leak a writable page
636                  * reference and allows user-space to still modify it after
637                  * freezing, while verifier will assume contents do not change
638                  */
639                 if (map->map_flags & BPF_F_RDONLY_PROG) {
640                         err = -EACCES;
641                         goto out;
642                 }
643         }
644
645         /* set default open/close callbacks */
646         vma->vm_ops = &bpf_map_default_vmops;
647         vma->vm_private_data = map;
648         vma->vm_flags &= ~VM_MAYEXEC;
649         if (!(vma->vm_flags & VM_WRITE))
650                 /* disallow re-mapping with PROT_WRITE */
651                 vma->vm_flags &= ~VM_MAYWRITE;
652
653         err = map->ops->map_mmap(map, vma);
654         if (err)
655                 goto out;
656
657         if (vma->vm_flags & VM_MAYWRITE)
658                 map->writecnt++;
659 out:
660         mutex_unlock(&map->freeze_mutex);
661         return err;
662 }
663
664 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
665 {
666         struct bpf_map *map = filp->private_data;
667
668         if (map->ops->map_poll)
669                 return map->ops->map_poll(map, filp, pts);
670
671         return EPOLLERR;
672 }
673
674 const struct file_operations bpf_map_fops = {
675 #ifdef CONFIG_PROC_FS
676         .show_fdinfo    = bpf_map_show_fdinfo,
677 #endif
678         .release        = bpf_map_release,
679         .read           = bpf_dummy_read,
680         .write          = bpf_dummy_write,
681         .mmap           = bpf_map_mmap,
682         .poll           = bpf_map_poll,
683 };
684
685 int bpf_map_new_fd(struct bpf_map *map, int flags)
686 {
687         int ret;
688
689         ret = security_bpf_map(map, OPEN_FMODE(flags));
690         if (ret < 0)
691                 return ret;
692
693         return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
694                                 flags | O_CLOEXEC);
695 }
696
697 int bpf_get_file_flag(int flags)
698 {
699         if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
700                 return -EINVAL;
701         if (flags & BPF_F_RDONLY)
702                 return O_RDONLY;
703         if (flags & BPF_F_WRONLY)
704                 return O_WRONLY;
705         return O_RDWR;
706 }
707
708 /* helper macro to check that unused fields 'union bpf_attr' are zero */
709 #define CHECK_ATTR(CMD) \
710         memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
711                    sizeof(attr->CMD##_LAST_FIELD), 0, \
712                    sizeof(*attr) - \
713                    offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
714                    sizeof(attr->CMD##_LAST_FIELD)) != NULL
715
716 /* dst and src must have at least "size" number of bytes.
717  * Return strlen on success and < 0 on error.
718  */
719 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
720 {
721         const char *end = src + size;
722         const char *orig_src = src;
723
724         memset(dst, 0, size);
725         /* Copy all isalnum(), '_' and '.' chars. */
726         while (src < end && *src) {
727                 if (!isalnum(*src) &&
728                     *src != '_' && *src != '.')
729                         return -EINVAL;
730                 *dst++ = *src++;
731         }
732
733         /* No '\0' found in "size" number of bytes */
734         if (src == end)
735                 return -EINVAL;
736
737         return src - orig_src;
738 }
739
740 int map_check_no_btf(const struct bpf_map *map,
741                      const struct btf *btf,
742                      const struct btf_type *key_type,
743                      const struct btf_type *value_type)
744 {
745         return -ENOTSUPP;
746 }
747
748 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
749                          u32 btf_key_id, u32 btf_value_id)
750 {
751         const struct btf_type *key_type, *value_type;
752         u32 key_size, value_size;
753         int ret = 0;
754
755         /* Some maps allow key to be unspecified. */
756         if (btf_key_id) {
757                 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
758                 if (!key_type || key_size != map->key_size)
759                         return -EINVAL;
760         } else {
761                 key_type = btf_type_by_id(btf, 0);
762                 if (!map->ops->map_check_btf)
763                         return -EINVAL;
764         }
765
766         value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
767         if (!value_type || value_size != map->value_size)
768                 return -EINVAL;
769
770         map->spin_lock_off = btf_find_spin_lock(btf, value_type);
771
772         if (map_value_has_spin_lock(map)) {
773                 if (map->map_flags & BPF_F_RDONLY_PROG)
774                         return -EACCES;
775                 if (map->map_type != BPF_MAP_TYPE_HASH &&
776                     map->map_type != BPF_MAP_TYPE_ARRAY &&
777                     map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
778                     map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
779                     map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
780                     map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
781                         return -ENOTSUPP;
782                 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
783                     map->value_size) {
784                         WARN_ONCE(1,
785                                   "verifier bug spin_lock_off %d value_size %d\n",
786                                   map->spin_lock_off, map->value_size);
787                         return -EFAULT;
788                 }
789         }
790
791         if (map->ops->map_check_btf)
792                 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
793
794         return ret;
795 }
796
797 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
798 /* called via syscall */
799 static int map_create(union bpf_attr *attr)
800 {
801         int numa_node = bpf_map_attr_numa_node(attr);
802         struct bpf_map *map;
803         int f_flags;
804         int err;
805
806         err = CHECK_ATTR(BPF_MAP_CREATE);
807         if (err)
808                 return -EINVAL;
809
810         if (attr->btf_vmlinux_value_type_id) {
811                 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
812                     attr->btf_key_type_id || attr->btf_value_type_id)
813                         return -EINVAL;
814         } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
815                 return -EINVAL;
816         }
817
818         f_flags = bpf_get_file_flag(attr->map_flags);
819         if (f_flags < 0)
820                 return f_flags;
821
822         if (numa_node != NUMA_NO_NODE &&
823             ((unsigned int)numa_node >= nr_node_ids ||
824              !node_online(numa_node)))
825                 return -EINVAL;
826
827         /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
828         map = find_and_alloc_map(attr);
829         if (IS_ERR(map))
830                 return PTR_ERR(map);
831
832         err = bpf_obj_name_cpy(map->name, attr->map_name,
833                                sizeof(attr->map_name));
834         if (err < 0)
835                 goto free_map;
836
837         atomic64_set(&map->refcnt, 1);
838         atomic64_set(&map->usercnt, 1);
839         mutex_init(&map->freeze_mutex);
840
841         map->spin_lock_off = -EINVAL;
842         if (attr->btf_key_type_id || attr->btf_value_type_id ||
843             /* Even the map's value is a kernel's struct,
844              * the bpf_prog.o must have BTF to begin with
845              * to figure out the corresponding kernel's
846              * counter part.  Thus, attr->btf_fd has
847              * to be valid also.
848              */
849             attr->btf_vmlinux_value_type_id) {
850                 struct btf *btf;
851
852                 btf = btf_get_by_fd(attr->btf_fd);
853                 if (IS_ERR(btf)) {
854                         err = PTR_ERR(btf);
855                         goto free_map;
856                 }
857                 if (btf_is_kernel(btf)) {
858                         btf_put(btf);
859                         err = -EACCES;
860                         goto free_map;
861                 }
862                 map->btf = btf;
863
864                 if (attr->btf_value_type_id) {
865                         err = map_check_btf(map, btf, attr->btf_key_type_id,
866                                             attr->btf_value_type_id);
867                         if (err)
868                                 goto free_map;
869                 }
870
871                 map->btf_key_type_id = attr->btf_key_type_id;
872                 map->btf_value_type_id = attr->btf_value_type_id;
873                 map->btf_vmlinux_value_type_id =
874                         attr->btf_vmlinux_value_type_id;
875         }
876
877         err = security_bpf_map_alloc(map);
878         if (err)
879                 goto free_map;
880
881         err = bpf_map_alloc_id(map);
882         if (err)
883                 goto free_map_sec;
884
885         bpf_map_save_memcg(map);
886
887         err = bpf_map_new_fd(map, f_flags);
888         if (err < 0) {
889                 /* failed to allocate fd.
890                  * bpf_map_put_with_uref() is needed because the above
891                  * bpf_map_alloc_id() has published the map
892                  * to the userspace and the userspace may
893                  * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
894                  */
895                 bpf_map_put_with_uref(map);
896                 return err;
897         }
898
899         return err;
900
901 free_map_sec:
902         security_bpf_map_free(map);
903 free_map:
904         btf_put(map->btf);
905         map->ops->map_free(map);
906         return err;
907 }
908
909 /* if error is returned, fd is released.
910  * On success caller should complete fd access with matching fdput()
911  */
912 struct bpf_map *__bpf_map_get(struct fd f)
913 {
914         if (!f.file)
915                 return ERR_PTR(-EBADF);
916         if (f.file->f_op != &bpf_map_fops) {
917                 fdput(f);
918                 return ERR_PTR(-EINVAL);
919         }
920
921         return f.file->private_data;
922 }
923
924 void bpf_map_inc(struct bpf_map *map)
925 {
926         atomic64_inc(&map->refcnt);
927 }
928 EXPORT_SYMBOL_GPL(bpf_map_inc);
929
930 void bpf_map_inc_with_uref(struct bpf_map *map)
931 {
932         atomic64_inc(&map->refcnt);
933         atomic64_inc(&map->usercnt);
934 }
935 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
936
937 struct bpf_map *bpf_map_get(u32 ufd)
938 {
939         struct fd f = fdget(ufd);
940         struct bpf_map *map;
941
942         map = __bpf_map_get(f);
943         if (IS_ERR(map))
944                 return map;
945
946         bpf_map_inc(map);
947         fdput(f);
948
949         return map;
950 }
951
952 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
953 {
954         struct fd f = fdget(ufd);
955         struct bpf_map *map;
956
957         map = __bpf_map_get(f);
958         if (IS_ERR(map))
959                 return map;
960
961         bpf_map_inc_with_uref(map);
962         fdput(f);
963
964         return map;
965 }
966
967 /* map_idr_lock should have been held */
968 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
969 {
970         int refold;
971
972         refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
973         if (!refold)
974                 return ERR_PTR(-ENOENT);
975         if (uref)
976                 atomic64_inc(&map->usercnt);
977
978         return map;
979 }
980
981 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
982 {
983         spin_lock_bh(&map_idr_lock);
984         map = __bpf_map_inc_not_zero(map, false);
985         spin_unlock_bh(&map_idr_lock);
986
987         return map;
988 }
989 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
990
991 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
992 {
993         return -ENOTSUPP;
994 }
995
996 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
997 {
998         if (key_size)
999                 return memdup_user(ukey, key_size);
1000
1001         if (ukey)
1002                 return ERR_PTR(-EINVAL);
1003
1004         return NULL;
1005 }
1006
1007 /* last field in 'union bpf_attr' used by this command */
1008 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1009
1010 static int map_lookup_elem(union bpf_attr *attr)
1011 {
1012         void __user *ukey = u64_to_user_ptr(attr->key);
1013         void __user *uvalue = u64_to_user_ptr(attr->value);
1014         int ufd = attr->map_fd;
1015         struct bpf_map *map;
1016         void *key, *value;
1017         u32 value_size;
1018         struct fd f;
1019         int err;
1020
1021         if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1022                 return -EINVAL;
1023
1024         if (attr->flags & ~BPF_F_LOCK)
1025                 return -EINVAL;
1026
1027         f = fdget(ufd);
1028         map = __bpf_map_get(f);
1029         if (IS_ERR(map))
1030                 return PTR_ERR(map);
1031         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1032                 err = -EPERM;
1033                 goto err_put;
1034         }
1035
1036         if ((attr->flags & BPF_F_LOCK) &&
1037             !map_value_has_spin_lock(map)) {
1038                 err = -EINVAL;
1039                 goto err_put;
1040         }
1041
1042         key = __bpf_copy_key(ukey, map->key_size);
1043         if (IS_ERR(key)) {
1044                 err = PTR_ERR(key);
1045                 goto err_put;
1046         }
1047
1048         value_size = bpf_map_value_size(map);
1049
1050         err = -ENOMEM;
1051         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1052         if (!value)
1053                 goto free_key;
1054
1055         err = bpf_map_copy_value(map, key, value, attr->flags);
1056         if (err)
1057                 goto free_value;
1058
1059         err = -EFAULT;
1060         if (copy_to_user(uvalue, value, value_size) != 0)
1061                 goto free_value;
1062
1063         err = 0;
1064
1065 free_value:
1066         kfree(value);
1067 free_key:
1068         kfree(key);
1069 err_put:
1070         fdput(f);
1071         return err;
1072 }
1073
1074
1075 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1076
1077 static int map_update_elem(union bpf_attr *attr)
1078 {
1079         void __user *ukey = u64_to_user_ptr(attr->key);
1080         void __user *uvalue = u64_to_user_ptr(attr->value);
1081         int ufd = attr->map_fd;
1082         struct bpf_map *map;
1083         void *key, *value;
1084         u32 value_size;
1085         struct fd f;
1086         int err;
1087
1088         if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1089                 return -EINVAL;
1090
1091         f = fdget(ufd);
1092         map = __bpf_map_get(f);
1093         if (IS_ERR(map))
1094                 return PTR_ERR(map);
1095         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1096                 err = -EPERM;
1097                 goto err_put;
1098         }
1099
1100         if ((attr->flags & BPF_F_LOCK) &&
1101             !map_value_has_spin_lock(map)) {
1102                 err = -EINVAL;
1103                 goto err_put;
1104         }
1105
1106         key = __bpf_copy_key(ukey, map->key_size);
1107         if (IS_ERR(key)) {
1108                 err = PTR_ERR(key);
1109                 goto err_put;
1110         }
1111
1112         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1113             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1114             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
1115             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
1116                 value_size = round_up(map->value_size, 8) * num_possible_cpus();
1117         else
1118                 value_size = map->value_size;
1119
1120         err = -ENOMEM;
1121         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1122         if (!value)
1123                 goto free_key;
1124
1125         err = -EFAULT;
1126         if (copy_from_user(value, uvalue, value_size) != 0)
1127                 goto free_value;
1128
1129         err = bpf_map_update_value(map, f, key, value, attr->flags);
1130
1131 free_value:
1132         kfree(value);
1133 free_key:
1134         kfree(key);
1135 err_put:
1136         fdput(f);
1137         return err;
1138 }
1139
1140 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1141
1142 static int map_delete_elem(union bpf_attr *attr)
1143 {
1144         void __user *ukey = u64_to_user_ptr(attr->key);
1145         int ufd = attr->map_fd;
1146         struct bpf_map *map;
1147         struct fd f;
1148         void *key;
1149         int err;
1150
1151         if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1152                 return -EINVAL;
1153
1154         f = fdget(ufd);
1155         map = __bpf_map_get(f);
1156         if (IS_ERR(map))
1157                 return PTR_ERR(map);
1158         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1159                 err = -EPERM;
1160                 goto err_put;
1161         }
1162
1163         key = __bpf_copy_key(ukey, map->key_size);
1164         if (IS_ERR(key)) {
1165                 err = PTR_ERR(key);
1166                 goto err_put;
1167         }
1168
1169         if (bpf_map_is_dev_bound(map)) {
1170                 err = bpf_map_offload_delete_elem(map, key);
1171                 goto out;
1172         } else if (IS_FD_PROG_ARRAY(map) ||
1173                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1174                 /* These maps require sleepable context */
1175                 err = map->ops->map_delete_elem(map, key);
1176                 goto out;
1177         }
1178
1179         bpf_disable_instrumentation();
1180         rcu_read_lock();
1181         err = map->ops->map_delete_elem(map, key);
1182         rcu_read_unlock();
1183         bpf_enable_instrumentation();
1184         maybe_wait_bpf_programs(map);
1185 out:
1186         kfree(key);
1187 err_put:
1188         fdput(f);
1189         return err;
1190 }
1191
1192 /* last field in 'union bpf_attr' used by this command */
1193 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1194
1195 static int map_get_next_key(union bpf_attr *attr)
1196 {
1197         void __user *ukey = u64_to_user_ptr(attr->key);
1198         void __user *unext_key = u64_to_user_ptr(attr->next_key);
1199         int ufd = attr->map_fd;
1200         struct bpf_map *map;
1201         void *key, *next_key;
1202         struct fd f;
1203         int err;
1204
1205         if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1206                 return -EINVAL;
1207
1208         f = fdget(ufd);
1209         map = __bpf_map_get(f);
1210         if (IS_ERR(map))
1211                 return PTR_ERR(map);
1212         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1213                 err = -EPERM;
1214                 goto err_put;
1215         }
1216
1217         if (ukey) {
1218                 key = __bpf_copy_key(ukey, map->key_size);
1219                 if (IS_ERR(key)) {
1220                         err = PTR_ERR(key);
1221                         goto err_put;
1222                 }
1223         } else {
1224                 key = NULL;
1225         }
1226
1227         err = -ENOMEM;
1228         next_key = kmalloc(map->key_size, GFP_USER);
1229         if (!next_key)
1230                 goto free_key;
1231
1232         if (bpf_map_is_dev_bound(map)) {
1233                 err = bpf_map_offload_get_next_key(map, key, next_key);
1234                 goto out;
1235         }
1236
1237         rcu_read_lock();
1238         err = map->ops->map_get_next_key(map, key, next_key);
1239         rcu_read_unlock();
1240 out:
1241         if (err)
1242                 goto free_next_key;
1243
1244         err = -EFAULT;
1245         if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1246                 goto free_next_key;
1247
1248         err = 0;
1249
1250 free_next_key:
1251         kfree(next_key);
1252 free_key:
1253         kfree(key);
1254 err_put:
1255         fdput(f);
1256         return err;
1257 }
1258
1259 int generic_map_delete_batch(struct bpf_map *map,
1260                              const union bpf_attr *attr,
1261                              union bpf_attr __user *uattr)
1262 {
1263         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1264         u32 cp, max_count;
1265         int err = 0;
1266         void *key;
1267
1268         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1269                 return -EINVAL;
1270
1271         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1272             !map_value_has_spin_lock(map)) {
1273                 return -EINVAL;
1274         }
1275
1276         max_count = attr->batch.count;
1277         if (!max_count)
1278                 return 0;
1279
1280         key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1281         if (!key)
1282                 return -ENOMEM;
1283
1284         for (cp = 0; cp < max_count; cp++) {
1285                 err = -EFAULT;
1286                 if (copy_from_user(key, keys + cp * map->key_size,
1287                                    map->key_size))
1288                         break;
1289
1290                 if (bpf_map_is_dev_bound(map)) {
1291                         err = bpf_map_offload_delete_elem(map, key);
1292                         break;
1293                 }
1294
1295                 bpf_disable_instrumentation();
1296                 rcu_read_lock();
1297                 err = map->ops->map_delete_elem(map, key);
1298                 rcu_read_unlock();
1299                 bpf_enable_instrumentation();
1300                 maybe_wait_bpf_programs(map);
1301                 if (err)
1302                         break;
1303         }
1304         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1305                 err = -EFAULT;
1306
1307         kfree(key);
1308         return err;
1309 }
1310
1311 int generic_map_update_batch(struct bpf_map *map,
1312                              const union bpf_attr *attr,
1313                              union bpf_attr __user *uattr)
1314 {
1315         void __user *values = u64_to_user_ptr(attr->batch.values);
1316         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1317         u32 value_size, cp, max_count;
1318         int ufd = attr->map_fd;
1319         void *key, *value;
1320         struct fd f;
1321         int err = 0;
1322
1323         f = fdget(ufd);
1324         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1325                 return -EINVAL;
1326
1327         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1328             !map_value_has_spin_lock(map)) {
1329                 return -EINVAL;
1330         }
1331
1332         value_size = bpf_map_value_size(map);
1333
1334         max_count = attr->batch.count;
1335         if (!max_count)
1336                 return 0;
1337
1338         key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1339         if (!key)
1340                 return -ENOMEM;
1341
1342         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1343         if (!value) {
1344                 kfree(key);
1345                 return -ENOMEM;
1346         }
1347
1348         for (cp = 0; cp < max_count; cp++) {
1349                 err = -EFAULT;
1350                 if (copy_from_user(key, keys + cp * map->key_size,
1351                     map->key_size) ||
1352                     copy_from_user(value, values + cp * value_size, value_size))
1353                         break;
1354
1355                 err = bpf_map_update_value(map, f, key, value,
1356                                            attr->batch.elem_flags);
1357
1358                 if (err)
1359                         break;
1360         }
1361
1362         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1363                 err = -EFAULT;
1364
1365         kfree(value);
1366         kfree(key);
1367         return err;
1368 }
1369
1370 #define MAP_LOOKUP_RETRIES 3
1371
1372 int generic_map_lookup_batch(struct bpf_map *map,
1373                                     const union bpf_attr *attr,
1374                                     union bpf_attr __user *uattr)
1375 {
1376         void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1377         void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1378         void __user *values = u64_to_user_ptr(attr->batch.values);
1379         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1380         void *buf, *buf_prevkey, *prev_key, *key, *value;
1381         int err, retry = MAP_LOOKUP_RETRIES;
1382         u32 value_size, cp, max_count;
1383
1384         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1385                 return -EINVAL;
1386
1387         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1388             !map_value_has_spin_lock(map))
1389                 return -EINVAL;
1390
1391         value_size = bpf_map_value_size(map);
1392
1393         max_count = attr->batch.count;
1394         if (!max_count)
1395                 return 0;
1396
1397         if (put_user(0, &uattr->batch.count))
1398                 return -EFAULT;
1399
1400         buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1401         if (!buf_prevkey)
1402                 return -ENOMEM;
1403
1404         buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1405         if (!buf) {
1406                 kfree(buf_prevkey);
1407                 return -ENOMEM;
1408         }
1409
1410         err = -EFAULT;
1411         prev_key = NULL;
1412         if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1413                 goto free_buf;
1414         key = buf;
1415         value = key + map->key_size;
1416         if (ubatch)
1417                 prev_key = buf_prevkey;
1418
1419         for (cp = 0; cp < max_count;) {
1420                 rcu_read_lock();
1421                 err = map->ops->map_get_next_key(map, prev_key, key);
1422                 rcu_read_unlock();
1423                 if (err)
1424                         break;
1425                 err = bpf_map_copy_value(map, key, value,
1426                                          attr->batch.elem_flags);
1427
1428                 if (err == -ENOENT) {
1429                         if (retry) {
1430                                 retry--;
1431                                 continue;
1432                         }
1433                         err = -EINTR;
1434                         break;
1435                 }
1436
1437                 if (err)
1438                         goto free_buf;
1439
1440                 if (copy_to_user(keys + cp * map->key_size, key,
1441                                  map->key_size)) {
1442                         err = -EFAULT;
1443                         goto free_buf;
1444                 }
1445                 if (copy_to_user(values + cp * value_size, value, value_size)) {
1446                         err = -EFAULT;
1447                         goto free_buf;
1448                 }
1449
1450                 if (!prev_key)
1451                         prev_key = buf_prevkey;
1452
1453                 swap(prev_key, key);
1454                 retry = MAP_LOOKUP_RETRIES;
1455                 cp++;
1456         }
1457
1458         if (err == -EFAULT)
1459                 goto free_buf;
1460
1461         if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1462                     (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1463                 err = -EFAULT;
1464
1465 free_buf:
1466         kfree(buf_prevkey);
1467         kfree(buf);
1468         return err;
1469 }
1470
1471 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1472
1473 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1474 {
1475         void __user *ukey = u64_to_user_ptr(attr->key);
1476         void __user *uvalue = u64_to_user_ptr(attr->value);
1477         int ufd = attr->map_fd;
1478         struct bpf_map *map;
1479         void *key, *value;
1480         u32 value_size;
1481         struct fd f;
1482         int err;
1483
1484         if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1485                 return -EINVAL;
1486
1487         f = fdget(ufd);
1488         map = __bpf_map_get(f);
1489         if (IS_ERR(map))
1490                 return PTR_ERR(map);
1491         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1492             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1493                 err = -EPERM;
1494                 goto err_put;
1495         }
1496
1497         key = __bpf_copy_key(ukey, map->key_size);
1498         if (IS_ERR(key)) {
1499                 err = PTR_ERR(key);
1500                 goto err_put;
1501         }
1502
1503         value_size = map->value_size;
1504
1505         err = -ENOMEM;
1506         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1507         if (!value)
1508                 goto free_key;
1509
1510         if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1511             map->map_type == BPF_MAP_TYPE_STACK) {
1512                 err = map->ops->map_pop_elem(map, value);
1513         } else {
1514                 err = -ENOTSUPP;
1515         }
1516
1517         if (err)
1518                 goto free_value;
1519
1520         if (copy_to_user(uvalue, value, value_size) != 0) {
1521                 err = -EFAULT;
1522                 goto free_value;
1523         }
1524
1525         err = 0;
1526
1527 free_value:
1528         kfree(value);
1529 free_key:
1530         kfree(key);
1531 err_put:
1532         fdput(f);
1533         return err;
1534 }
1535
1536 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1537
1538 static int map_freeze(const union bpf_attr *attr)
1539 {
1540         int err = 0, ufd = attr->map_fd;
1541         struct bpf_map *map;
1542         struct fd f;
1543
1544         if (CHECK_ATTR(BPF_MAP_FREEZE))
1545                 return -EINVAL;
1546
1547         f = fdget(ufd);
1548         map = __bpf_map_get(f);
1549         if (IS_ERR(map))
1550                 return PTR_ERR(map);
1551
1552         if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1553                 fdput(f);
1554                 return -ENOTSUPP;
1555         }
1556
1557         mutex_lock(&map->freeze_mutex);
1558
1559         if (map->writecnt) {
1560                 err = -EBUSY;
1561                 goto err_put;
1562         }
1563         if (READ_ONCE(map->frozen)) {
1564                 err = -EBUSY;
1565                 goto err_put;
1566         }
1567         if (!bpf_capable()) {
1568                 err = -EPERM;
1569                 goto err_put;
1570         }
1571
1572         WRITE_ONCE(map->frozen, true);
1573 err_put:
1574         mutex_unlock(&map->freeze_mutex);
1575         fdput(f);
1576         return err;
1577 }
1578
1579 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1580 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1581         [_id] = & _name ## _prog_ops,
1582 #define BPF_MAP_TYPE(_id, _ops)
1583 #define BPF_LINK_TYPE(_id, _name)
1584 #include <linux/bpf_types.h>
1585 #undef BPF_PROG_TYPE
1586 #undef BPF_MAP_TYPE
1587 #undef BPF_LINK_TYPE
1588 };
1589
1590 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1591 {
1592         const struct bpf_prog_ops *ops;
1593
1594         if (type >= ARRAY_SIZE(bpf_prog_types))
1595                 return -EINVAL;
1596         type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1597         ops = bpf_prog_types[type];
1598         if (!ops)
1599                 return -EINVAL;
1600
1601         if (!bpf_prog_is_dev_bound(prog->aux))
1602                 prog->aux->ops = ops;
1603         else
1604                 prog->aux->ops = &bpf_offload_prog_ops;
1605         prog->type = type;
1606         return 0;
1607 }
1608
1609 enum bpf_audit {
1610         BPF_AUDIT_LOAD,
1611         BPF_AUDIT_UNLOAD,
1612         BPF_AUDIT_MAX,
1613 };
1614
1615 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1616         [BPF_AUDIT_LOAD]   = "LOAD",
1617         [BPF_AUDIT_UNLOAD] = "UNLOAD",
1618 };
1619
1620 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1621 {
1622         struct audit_context *ctx = NULL;
1623         struct audit_buffer *ab;
1624
1625         if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1626                 return;
1627         if (audit_enabled == AUDIT_OFF)
1628                 return;
1629         if (op == BPF_AUDIT_LOAD)
1630                 ctx = audit_context();
1631         ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1632         if (unlikely(!ab))
1633                 return;
1634         audit_log_format(ab, "prog-id=%u op=%s",
1635                          prog->aux->id, bpf_audit_str[op]);
1636         audit_log_end(ab);
1637 }
1638
1639 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1640 {
1641         int id;
1642
1643         idr_preload(GFP_KERNEL);
1644         spin_lock_bh(&prog_idr_lock);
1645         id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1646         if (id > 0)
1647                 prog->aux->id = id;
1648         spin_unlock_bh(&prog_idr_lock);
1649         idr_preload_end();
1650
1651         /* id is in [1, INT_MAX) */
1652         if (WARN_ON_ONCE(!id))
1653                 return -ENOSPC;
1654
1655         return id > 0 ? 0 : id;
1656 }
1657
1658 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1659 {
1660         /* cBPF to eBPF migrations are currently not in the idr store.
1661          * Offloaded programs are removed from the store when their device
1662          * disappears - even if someone grabs an fd to them they are unusable,
1663          * simply waiting for refcnt to drop to be freed.
1664          */
1665         if (!prog->aux->id)
1666                 return;
1667
1668         if (do_idr_lock)
1669                 spin_lock_bh(&prog_idr_lock);
1670         else
1671                 __acquire(&prog_idr_lock);
1672
1673         idr_remove(&prog_idr, prog->aux->id);
1674         prog->aux->id = 0;
1675
1676         if (do_idr_lock)
1677                 spin_unlock_bh(&prog_idr_lock);
1678         else
1679                 __release(&prog_idr_lock);
1680 }
1681
1682 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1683 {
1684         struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1685
1686         kvfree(aux->func_info);
1687         kfree(aux->func_info_aux);
1688         free_uid(aux->user);
1689         security_bpf_prog_free(aux);
1690         bpf_prog_free(aux->prog);
1691 }
1692
1693 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1694 {
1695         bpf_prog_kallsyms_del_all(prog);
1696         btf_put(prog->aux->btf);
1697         bpf_prog_free_linfo(prog);
1698         if (prog->aux->attach_btf)
1699                 btf_put(prog->aux->attach_btf);
1700
1701         if (deferred) {
1702                 if (prog->aux->sleepable)
1703                         call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
1704                 else
1705                         call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1706         } else {
1707                 __bpf_prog_put_rcu(&prog->aux->rcu);
1708         }
1709 }
1710
1711 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1712 {
1713         if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1714                 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1715                 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1716                 /* bpf_prog_free_id() must be called first */
1717                 bpf_prog_free_id(prog, do_idr_lock);
1718                 __bpf_prog_put_noref(prog, true);
1719         }
1720 }
1721
1722 void bpf_prog_put(struct bpf_prog *prog)
1723 {
1724         __bpf_prog_put(prog, true);
1725 }
1726 EXPORT_SYMBOL_GPL(bpf_prog_put);
1727
1728 static int bpf_prog_release(struct inode *inode, struct file *filp)
1729 {
1730         struct bpf_prog *prog = filp->private_data;
1731
1732         bpf_prog_put(prog);
1733         return 0;
1734 }
1735
1736 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1737                                struct bpf_prog_stats *stats)
1738 {
1739         u64 nsecs = 0, cnt = 0, misses = 0;
1740         int cpu;
1741
1742         for_each_possible_cpu(cpu) {
1743                 const struct bpf_prog_stats *st;
1744                 unsigned int start;
1745                 u64 tnsecs, tcnt, tmisses;
1746
1747                 st = per_cpu_ptr(prog->stats, cpu);
1748                 do {
1749                         start = u64_stats_fetch_begin_irq(&st->syncp);
1750                         tnsecs = st->nsecs;
1751                         tcnt = st->cnt;
1752                         tmisses = st->misses;
1753                 } while (u64_stats_fetch_retry_irq(&st->syncp, start));
1754                 nsecs += tnsecs;
1755                 cnt += tcnt;
1756                 misses += tmisses;
1757         }
1758         stats->nsecs = nsecs;
1759         stats->cnt = cnt;
1760         stats->misses = misses;
1761 }
1762
1763 #ifdef CONFIG_PROC_FS
1764 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1765 {
1766         const struct bpf_prog *prog = filp->private_data;
1767         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1768         struct bpf_prog_stats stats;
1769
1770         bpf_prog_get_stats(prog, &stats);
1771         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1772         seq_printf(m,
1773                    "prog_type:\t%u\n"
1774                    "prog_jited:\t%u\n"
1775                    "prog_tag:\t%s\n"
1776                    "memlock:\t%llu\n"
1777                    "prog_id:\t%u\n"
1778                    "run_time_ns:\t%llu\n"
1779                    "run_cnt:\t%llu\n"
1780                    "recursion_misses:\t%llu\n",
1781                    prog->type,
1782                    prog->jited,
1783                    prog_tag,
1784                    prog->pages * 1ULL << PAGE_SHIFT,
1785                    prog->aux->id,
1786                    stats.nsecs,
1787                    stats.cnt,
1788                    stats.misses);
1789 }
1790 #endif
1791
1792 const struct file_operations bpf_prog_fops = {
1793 #ifdef CONFIG_PROC_FS
1794         .show_fdinfo    = bpf_prog_show_fdinfo,
1795 #endif
1796         .release        = bpf_prog_release,
1797         .read           = bpf_dummy_read,
1798         .write          = bpf_dummy_write,
1799 };
1800
1801 int bpf_prog_new_fd(struct bpf_prog *prog)
1802 {
1803         int ret;
1804
1805         ret = security_bpf_prog(prog);
1806         if (ret < 0)
1807                 return ret;
1808
1809         return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1810                                 O_RDWR | O_CLOEXEC);
1811 }
1812
1813 static struct bpf_prog *____bpf_prog_get(struct fd f)
1814 {
1815         if (!f.file)
1816                 return ERR_PTR(-EBADF);
1817         if (f.file->f_op != &bpf_prog_fops) {
1818                 fdput(f);
1819                 return ERR_PTR(-EINVAL);
1820         }
1821
1822         return f.file->private_data;
1823 }
1824
1825 void bpf_prog_add(struct bpf_prog *prog, int i)
1826 {
1827         atomic64_add(i, &prog->aux->refcnt);
1828 }
1829 EXPORT_SYMBOL_GPL(bpf_prog_add);
1830
1831 void bpf_prog_sub(struct bpf_prog *prog, int i)
1832 {
1833         /* Only to be used for undoing previous bpf_prog_add() in some
1834          * error path. We still know that another entity in our call
1835          * path holds a reference to the program, thus atomic_sub() can
1836          * be safely used in such cases!
1837          */
1838         WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1839 }
1840 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1841
1842 void bpf_prog_inc(struct bpf_prog *prog)
1843 {
1844         atomic64_inc(&prog->aux->refcnt);
1845 }
1846 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1847
1848 /* prog_idr_lock should have been held */
1849 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1850 {
1851         int refold;
1852
1853         refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1854
1855         if (!refold)
1856                 return ERR_PTR(-ENOENT);
1857
1858         return prog;
1859 }
1860 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1861
1862 bool bpf_prog_get_ok(struct bpf_prog *prog,
1863                             enum bpf_prog_type *attach_type, bool attach_drv)
1864 {
1865         /* not an attachment, just a refcount inc, always allow */
1866         if (!attach_type)
1867                 return true;
1868
1869         if (prog->type != *attach_type)
1870                 return false;
1871         if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1872                 return false;
1873
1874         return true;
1875 }
1876
1877 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1878                                        bool attach_drv)
1879 {
1880         struct fd f = fdget(ufd);
1881         struct bpf_prog *prog;
1882
1883         prog = ____bpf_prog_get(f);
1884         if (IS_ERR(prog))
1885                 return prog;
1886         if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1887                 prog = ERR_PTR(-EINVAL);
1888                 goto out;
1889         }
1890
1891         bpf_prog_inc(prog);
1892 out:
1893         fdput(f);
1894         return prog;
1895 }
1896
1897 struct bpf_prog *bpf_prog_get(u32 ufd)
1898 {
1899         return __bpf_prog_get(ufd, NULL, false);
1900 }
1901
1902 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1903                                        bool attach_drv)
1904 {
1905         return __bpf_prog_get(ufd, &type, attach_drv);
1906 }
1907 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1908
1909 /* Initially all BPF programs could be loaded w/o specifying
1910  * expected_attach_type. Later for some of them specifying expected_attach_type
1911  * at load time became required so that program could be validated properly.
1912  * Programs of types that are allowed to be loaded both w/ and w/o (for
1913  * backward compatibility) expected_attach_type, should have the default attach
1914  * type assigned to expected_attach_type for the latter case, so that it can be
1915  * validated later at attach time.
1916  *
1917  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1918  * prog type requires it but has some attach types that have to be backward
1919  * compatible.
1920  */
1921 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1922 {
1923         switch (attr->prog_type) {
1924         case BPF_PROG_TYPE_CGROUP_SOCK:
1925                 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1926                  * exist so checking for non-zero is the way to go here.
1927                  */
1928                 if (!attr->expected_attach_type)
1929                         attr->expected_attach_type =
1930                                 BPF_CGROUP_INET_SOCK_CREATE;
1931                 break;
1932         }
1933 }
1934
1935 static int
1936 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1937                            enum bpf_attach_type expected_attach_type,
1938                            struct btf *attach_btf, u32 btf_id,
1939                            struct bpf_prog *dst_prog)
1940 {
1941         if (btf_id) {
1942                 if (btf_id > BTF_MAX_TYPE)
1943                         return -EINVAL;
1944
1945                 if (!attach_btf && !dst_prog)
1946                         return -EINVAL;
1947
1948                 switch (prog_type) {
1949                 case BPF_PROG_TYPE_TRACING:
1950                 case BPF_PROG_TYPE_LSM:
1951                 case BPF_PROG_TYPE_STRUCT_OPS:
1952                 case BPF_PROG_TYPE_EXT:
1953                         break;
1954                 default:
1955                         return -EINVAL;
1956                 }
1957         }
1958
1959         if (attach_btf && (!btf_id || dst_prog))
1960                 return -EINVAL;
1961
1962         if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
1963             prog_type != BPF_PROG_TYPE_EXT)
1964                 return -EINVAL;
1965
1966         switch (prog_type) {
1967         case BPF_PROG_TYPE_CGROUP_SOCK:
1968                 switch (expected_attach_type) {
1969                 case BPF_CGROUP_INET_SOCK_CREATE:
1970                 case BPF_CGROUP_INET_SOCK_RELEASE:
1971                 case BPF_CGROUP_INET4_POST_BIND:
1972                 case BPF_CGROUP_INET6_POST_BIND:
1973                         return 0;
1974                 default:
1975                         return -EINVAL;
1976                 }
1977         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1978                 switch (expected_attach_type) {
1979                 case BPF_CGROUP_INET4_BIND:
1980                 case BPF_CGROUP_INET6_BIND:
1981                 case BPF_CGROUP_INET4_CONNECT:
1982                 case BPF_CGROUP_INET6_CONNECT:
1983                 case BPF_CGROUP_INET4_GETPEERNAME:
1984                 case BPF_CGROUP_INET6_GETPEERNAME:
1985                 case BPF_CGROUP_INET4_GETSOCKNAME:
1986                 case BPF_CGROUP_INET6_GETSOCKNAME:
1987                 case BPF_CGROUP_UDP4_SENDMSG:
1988                 case BPF_CGROUP_UDP6_SENDMSG:
1989                 case BPF_CGROUP_UDP4_RECVMSG:
1990                 case BPF_CGROUP_UDP6_RECVMSG:
1991                         return 0;
1992                 default:
1993                         return -EINVAL;
1994                 }
1995         case BPF_PROG_TYPE_CGROUP_SKB:
1996                 switch (expected_attach_type) {
1997                 case BPF_CGROUP_INET_INGRESS:
1998                 case BPF_CGROUP_INET_EGRESS:
1999                         return 0;
2000                 default:
2001                         return -EINVAL;
2002                 }
2003         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2004                 switch (expected_attach_type) {
2005                 case BPF_CGROUP_SETSOCKOPT:
2006                 case BPF_CGROUP_GETSOCKOPT:
2007                         return 0;
2008                 default:
2009                         return -EINVAL;
2010                 }
2011         case BPF_PROG_TYPE_SK_LOOKUP:
2012                 if (expected_attach_type == BPF_SK_LOOKUP)
2013                         return 0;
2014                 return -EINVAL;
2015         case BPF_PROG_TYPE_EXT:
2016                 if (expected_attach_type)
2017                         return -EINVAL;
2018                 fallthrough;
2019         default:
2020                 return 0;
2021         }
2022 }
2023
2024 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2025 {
2026         switch (prog_type) {
2027         case BPF_PROG_TYPE_SCHED_CLS:
2028         case BPF_PROG_TYPE_SCHED_ACT:
2029         case BPF_PROG_TYPE_XDP:
2030         case BPF_PROG_TYPE_LWT_IN:
2031         case BPF_PROG_TYPE_LWT_OUT:
2032         case BPF_PROG_TYPE_LWT_XMIT:
2033         case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2034         case BPF_PROG_TYPE_SK_SKB:
2035         case BPF_PROG_TYPE_SK_MSG:
2036         case BPF_PROG_TYPE_LIRC_MODE2:
2037         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2038         case BPF_PROG_TYPE_CGROUP_DEVICE:
2039         case BPF_PROG_TYPE_CGROUP_SOCK:
2040         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2041         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2042         case BPF_PROG_TYPE_CGROUP_SYSCTL:
2043         case BPF_PROG_TYPE_SOCK_OPS:
2044         case BPF_PROG_TYPE_EXT: /* extends any prog */
2045                 return true;
2046         case BPF_PROG_TYPE_CGROUP_SKB:
2047                 /* always unpriv */
2048         case BPF_PROG_TYPE_SK_REUSEPORT:
2049                 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2050         default:
2051                 return false;
2052         }
2053 }
2054
2055 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2056 {
2057         switch (prog_type) {
2058         case BPF_PROG_TYPE_KPROBE:
2059         case BPF_PROG_TYPE_TRACEPOINT:
2060         case BPF_PROG_TYPE_PERF_EVENT:
2061         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2062         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2063         case BPF_PROG_TYPE_TRACING:
2064         case BPF_PROG_TYPE_LSM:
2065         case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2066         case BPF_PROG_TYPE_EXT: /* extends any prog */
2067                 return true;
2068         default:
2069                 return false;
2070         }
2071 }
2072
2073 /* last field in 'union bpf_attr' used by this command */
2074 #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
2075
2076 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
2077 {
2078         enum bpf_prog_type type = attr->prog_type;
2079         struct bpf_prog *prog, *dst_prog = NULL;
2080         struct btf *attach_btf = NULL;
2081         int err;
2082         char license[128];
2083         bool is_gpl;
2084
2085         if (CHECK_ATTR(BPF_PROG_LOAD))
2086                 return -EINVAL;
2087
2088         if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2089                                  BPF_F_ANY_ALIGNMENT |
2090                                  BPF_F_TEST_STATE_FREQ |
2091                                  BPF_F_SLEEPABLE |
2092                                  BPF_F_TEST_RND_HI32))
2093                 return -EINVAL;
2094
2095         if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2096             (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2097             !bpf_capable())
2098                 return -EPERM;
2099
2100         /* copy eBPF program license from user space */
2101         if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
2102                               sizeof(license) - 1) < 0)
2103                 return -EFAULT;
2104         license[sizeof(license) - 1] = 0;
2105
2106         /* eBPF programs must be GPL compatible to use GPL-ed functions */
2107         is_gpl = license_is_gpl_compatible(license);
2108
2109         if (attr->insn_cnt == 0 ||
2110             attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2111                 return -E2BIG;
2112         if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2113             type != BPF_PROG_TYPE_CGROUP_SKB &&
2114             !bpf_capable())
2115                 return -EPERM;
2116
2117         if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2118                 return -EPERM;
2119         if (is_perfmon_prog_type(type) && !perfmon_capable())
2120                 return -EPERM;
2121
2122         /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2123          * or btf, we need to check which one it is
2124          */
2125         if (attr->attach_prog_fd) {
2126                 dst_prog = bpf_prog_get(attr->attach_prog_fd);
2127                 if (IS_ERR(dst_prog)) {
2128                         dst_prog = NULL;
2129                         attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2130                         if (IS_ERR(attach_btf))
2131                                 return -EINVAL;
2132                         if (!btf_is_kernel(attach_btf)) {
2133                                 /* attaching through specifying bpf_prog's BTF
2134                                  * objects directly might be supported eventually
2135                                  */
2136                                 btf_put(attach_btf);
2137                                 return -ENOTSUPP;
2138                         }
2139                 }
2140         } else if (attr->attach_btf_id) {
2141                 /* fall back to vmlinux BTF, if BTF type ID is specified */
2142                 attach_btf = bpf_get_btf_vmlinux();
2143                 if (IS_ERR(attach_btf))
2144                         return PTR_ERR(attach_btf);
2145                 if (!attach_btf)
2146                         return -EINVAL;
2147                 btf_get(attach_btf);
2148         }
2149
2150         bpf_prog_load_fixup_attach_type(attr);
2151         if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2152                                        attach_btf, attr->attach_btf_id,
2153                                        dst_prog)) {
2154                 if (dst_prog)
2155                         bpf_prog_put(dst_prog);
2156                 if (attach_btf)
2157                         btf_put(attach_btf);
2158                 return -EINVAL;
2159         }
2160
2161         /* plain bpf_prog allocation */
2162         prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2163         if (!prog) {
2164                 if (dst_prog)
2165                         bpf_prog_put(dst_prog);
2166                 if (attach_btf)
2167                         btf_put(attach_btf);
2168                 return -ENOMEM;
2169         }
2170
2171         prog->expected_attach_type = attr->expected_attach_type;
2172         prog->aux->attach_btf = attach_btf;
2173         prog->aux->attach_btf_id = attr->attach_btf_id;
2174         prog->aux->dst_prog = dst_prog;
2175         prog->aux->offload_requested = !!attr->prog_ifindex;
2176         prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2177
2178         err = security_bpf_prog_alloc(prog->aux);
2179         if (err)
2180                 goto free_prog;
2181
2182         prog->aux->user = get_current_user();
2183         prog->len = attr->insn_cnt;
2184
2185         err = -EFAULT;
2186         if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
2187                            bpf_prog_insn_size(prog)) != 0)
2188                 goto free_prog_sec;
2189
2190         prog->orig_prog = NULL;
2191         prog->jited = 0;
2192
2193         atomic64_set(&prog->aux->refcnt, 1);
2194         prog->gpl_compatible = is_gpl ? 1 : 0;
2195
2196         if (bpf_prog_is_dev_bound(prog->aux)) {
2197                 err = bpf_prog_offload_init(prog, attr);
2198                 if (err)
2199                         goto free_prog_sec;
2200         }
2201
2202         /* find program type: socket_filter vs tracing_filter */
2203         err = find_prog_type(type, prog);
2204         if (err < 0)
2205                 goto free_prog_sec;
2206
2207         prog->aux->load_time = ktime_get_boottime_ns();
2208         err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2209                                sizeof(attr->prog_name));
2210         if (err < 0)
2211                 goto free_prog_sec;
2212
2213         /* run eBPF verifier */
2214         err = bpf_check(&prog, attr, uattr);
2215         if (err < 0)
2216                 goto free_used_maps;
2217
2218         prog = bpf_prog_select_runtime(prog, &err);
2219         if (err < 0)
2220                 goto free_used_maps;
2221
2222         err = bpf_prog_alloc_id(prog);
2223         if (err)
2224                 goto free_used_maps;
2225
2226         /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2227          * effectively publicly exposed. However, retrieving via
2228          * bpf_prog_get_fd_by_id() will take another reference,
2229          * therefore it cannot be gone underneath us.
2230          *
2231          * Only for the time /after/ successful bpf_prog_new_fd()
2232          * and before returning to userspace, we might just hold
2233          * one reference and any parallel close on that fd could
2234          * rip everything out. Hence, below notifications must
2235          * happen before bpf_prog_new_fd().
2236          *
2237          * Also, any failure handling from this point onwards must
2238          * be using bpf_prog_put() given the program is exposed.
2239          */
2240         bpf_prog_kallsyms_add(prog);
2241         perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2242         bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2243
2244         err = bpf_prog_new_fd(prog);
2245         if (err < 0)
2246                 bpf_prog_put(prog);
2247         return err;
2248
2249 free_used_maps:
2250         /* In case we have subprogs, we need to wait for a grace
2251          * period before we can tear down JIT memory since symbols
2252          * are already exposed under kallsyms.
2253          */
2254         __bpf_prog_put_noref(prog, prog->aux->func_cnt);
2255         return err;
2256 free_prog_sec:
2257         free_uid(prog->aux->user);
2258         security_bpf_prog_free(prog->aux);
2259 free_prog:
2260         if (prog->aux->attach_btf)
2261                 btf_put(prog->aux->attach_btf);
2262         bpf_prog_free(prog);
2263         return err;
2264 }
2265
2266 #define BPF_OBJ_LAST_FIELD file_flags
2267
2268 static int bpf_obj_pin(const union bpf_attr *attr)
2269 {
2270         if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2271                 return -EINVAL;
2272
2273         return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2274 }
2275
2276 static int bpf_obj_get(const union bpf_attr *attr)
2277 {
2278         if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2279             attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2280                 return -EINVAL;
2281
2282         return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2283                                 attr->file_flags);
2284 }
2285
2286 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2287                    const struct bpf_link_ops *ops, struct bpf_prog *prog)
2288 {
2289         atomic64_set(&link->refcnt, 1);
2290         link->type = type;
2291         link->id = 0;
2292         link->ops = ops;
2293         link->prog = prog;
2294 }
2295
2296 static void bpf_link_free_id(int id)
2297 {
2298         if (!id)
2299                 return;
2300
2301         spin_lock_bh(&link_idr_lock);
2302         idr_remove(&link_idr, id);
2303         spin_unlock_bh(&link_idr_lock);
2304 }
2305
2306 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2307  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2308  * anon_inode's release() call. This helper marksbpf_link as
2309  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2310  * is not decremented, it's the responsibility of a calling code that failed
2311  * to complete bpf_link initialization.
2312  */
2313 void bpf_link_cleanup(struct bpf_link_primer *primer)
2314 {
2315         primer->link->prog = NULL;
2316         bpf_link_free_id(primer->id);
2317         fput(primer->file);
2318         put_unused_fd(primer->fd);
2319 }
2320
2321 void bpf_link_inc(struct bpf_link *link)
2322 {
2323         atomic64_inc(&link->refcnt);
2324 }
2325
2326 /* bpf_link_free is guaranteed to be called from process context */
2327 static void bpf_link_free(struct bpf_link *link)
2328 {
2329         bpf_link_free_id(link->id);
2330         if (link->prog) {
2331                 /* detach BPF program, clean up used resources */
2332                 link->ops->release(link);
2333                 bpf_prog_put(link->prog);
2334         }
2335         /* free bpf_link and its containing memory */
2336         link->ops->dealloc(link);
2337 }
2338
2339 static void bpf_link_put_deferred(struct work_struct *work)
2340 {
2341         struct bpf_link *link = container_of(work, struct bpf_link, work);
2342
2343         bpf_link_free(link);
2344 }
2345
2346 /* bpf_link_put can be called from atomic context, but ensures that resources
2347  * are freed from process context
2348  */
2349 void bpf_link_put(struct bpf_link *link)
2350 {
2351         if (!atomic64_dec_and_test(&link->refcnt))
2352                 return;
2353
2354         if (in_atomic()) {
2355                 INIT_WORK(&link->work, bpf_link_put_deferred);
2356                 schedule_work(&link->work);
2357         } else {
2358                 bpf_link_free(link);
2359         }
2360 }
2361
2362 static int bpf_link_release(struct inode *inode, struct file *filp)
2363 {
2364         struct bpf_link *link = filp->private_data;
2365
2366         bpf_link_put(link);
2367         return 0;
2368 }
2369
2370 #ifdef CONFIG_PROC_FS
2371 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2372 #define BPF_MAP_TYPE(_id, _ops)
2373 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2374 static const char *bpf_link_type_strs[] = {
2375         [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2376 #include <linux/bpf_types.h>
2377 };
2378 #undef BPF_PROG_TYPE
2379 #undef BPF_MAP_TYPE
2380 #undef BPF_LINK_TYPE
2381
2382 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2383 {
2384         const struct bpf_link *link = filp->private_data;
2385         const struct bpf_prog *prog = link->prog;
2386         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2387
2388         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2389         seq_printf(m,
2390                    "link_type:\t%s\n"
2391                    "link_id:\t%u\n"
2392                    "prog_tag:\t%s\n"
2393                    "prog_id:\t%u\n",
2394                    bpf_link_type_strs[link->type],
2395                    link->id,
2396                    prog_tag,
2397                    prog->aux->id);
2398         if (link->ops->show_fdinfo)
2399                 link->ops->show_fdinfo(link, m);
2400 }
2401 #endif
2402
2403 static const struct file_operations bpf_link_fops = {
2404 #ifdef CONFIG_PROC_FS
2405         .show_fdinfo    = bpf_link_show_fdinfo,
2406 #endif
2407         .release        = bpf_link_release,
2408         .read           = bpf_dummy_read,
2409         .write          = bpf_dummy_write,
2410 };
2411
2412 static int bpf_link_alloc_id(struct bpf_link *link)
2413 {
2414         int id;
2415
2416         idr_preload(GFP_KERNEL);
2417         spin_lock_bh(&link_idr_lock);
2418         id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2419         spin_unlock_bh(&link_idr_lock);
2420         idr_preload_end();
2421
2422         return id;
2423 }
2424
2425 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2426  * reserving unused FD and allocating ID from link_idr. This is to be paired
2427  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2428  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2429  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2430  * transient state is passed around in struct bpf_link_primer.
2431  * This is preferred way to create and initialize bpf_link, especially when
2432  * there are complicated and expensive operations inbetween creating bpf_link
2433  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2434  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2435  * expensive (and potentially failing) roll back operations in a rare case
2436  * that file, FD, or ID can't be allocated.
2437  */
2438 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2439 {
2440         struct file *file;
2441         int fd, id;
2442
2443         fd = get_unused_fd_flags(O_CLOEXEC);
2444         if (fd < 0)
2445                 return fd;
2446
2447
2448         id = bpf_link_alloc_id(link);
2449         if (id < 0) {
2450                 put_unused_fd(fd);
2451                 return id;
2452         }
2453
2454         file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2455         if (IS_ERR(file)) {
2456                 bpf_link_free_id(id);
2457                 put_unused_fd(fd);
2458                 return PTR_ERR(file);
2459         }
2460
2461         primer->link = link;
2462         primer->file = file;
2463         primer->fd = fd;
2464         primer->id = id;
2465         return 0;
2466 }
2467
2468 int bpf_link_settle(struct bpf_link_primer *primer)
2469 {
2470         /* make bpf_link fetchable by ID */
2471         spin_lock_bh(&link_idr_lock);
2472         primer->link->id = primer->id;
2473         spin_unlock_bh(&link_idr_lock);
2474         /* make bpf_link fetchable by FD */
2475         fd_install(primer->fd, primer->file);
2476         /* pass through installed FD */
2477         return primer->fd;
2478 }
2479
2480 int bpf_link_new_fd(struct bpf_link *link)
2481 {
2482         return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2483 }
2484
2485 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2486 {
2487         struct fd f = fdget(ufd);
2488         struct bpf_link *link;
2489
2490         if (!f.file)
2491                 return ERR_PTR(-EBADF);
2492         if (f.file->f_op != &bpf_link_fops) {
2493                 fdput(f);
2494                 return ERR_PTR(-EINVAL);
2495         }
2496
2497         link = f.file->private_data;
2498         bpf_link_inc(link);
2499         fdput(f);
2500
2501         return link;
2502 }
2503
2504 struct bpf_tracing_link {
2505         struct bpf_link link;
2506         enum bpf_attach_type attach_type;
2507         struct bpf_trampoline *trampoline;
2508         struct bpf_prog *tgt_prog;
2509 };
2510
2511 static void bpf_tracing_link_release(struct bpf_link *link)
2512 {
2513         struct bpf_tracing_link *tr_link =
2514                 container_of(link, struct bpf_tracing_link, link);
2515
2516         WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
2517                                                 tr_link->trampoline));
2518
2519         bpf_trampoline_put(tr_link->trampoline);
2520
2521         /* tgt_prog is NULL if target is a kernel function */
2522         if (tr_link->tgt_prog)
2523                 bpf_prog_put(tr_link->tgt_prog);
2524 }
2525
2526 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2527 {
2528         struct bpf_tracing_link *tr_link =
2529                 container_of(link, struct bpf_tracing_link, link);
2530
2531         kfree(tr_link);
2532 }
2533
2534 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2535                                          struct seq_file *seq)
2536 {
2537         struct bpf_tracing_link *tr_link =
2538                 container_of(link, struct bpf_tracing_link, link);
2539
2540         seq_printf(seq,
2541                    "attach_type:\t%d\n",
2542                    tr_link->attach_type);
2543 }
2544
2545 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2546                                            struct bpf_link_info *info)
2547 {
2548         struct bpf_tracing_link *tr_link =
2549                 container_of(link, struct bpf_tracing_link, link);
2550
2551         info->tracing.attach_type = tr_link->attach_type;
2552
2553         return 0;
2554 }
2555
2556 static const struct bpf_link_ops bpf_tracing_link_lops = {
2557         .release = bpf_tracing_link_release,
2558         .dealloc = bpf_tracing_link_dealloc,
2559         .show_fdinfo = bpf_tracing_link_show_fdinfo,
2560         .fill_link_info = bpf_tracing_link_fill_link_info,
2561 };
2562
2563 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
2564                                    int tgt_prog_fd,
2565                                    u32 btf_id)
2566 {
2567         struct bpf_link_primer link_primer;
2568         struct bpf_prog *tgt_prog = NULL;
2569         struct bpf_trampoline *tr = NULL;
2570         struct bpf_tracing_link *link;
2571         u64 key = 0;
2572         int err;
2573
2574         switch (prog->type) {
2575         case BPF_PROG_TYPE_TRACING:
2576                 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2577                     prog->expected_attach_type != BPF_TRACE_FEXIT &&
2578                     prog->expected_attach_type != BPF_MODIFY_RETURN) {
2579                         err = -EINVAL;
2580                         goto out_put_prog;
2581                 }
2582                 break;
2583         case BPF_PROG_TYPE_EXT:
2584                 if (prog->expected_attach_type != 0) {
2585                         err = -EINVAL;
2586                         goto out_put_prog;
2587                 }
2588                 break;
2589         case BPF_PROG_TYPE_LSM:
2590                 if (prog->expected_attach_type != BPF_LSM_MAC) {
2591                         err = -EINVAL;
2592                         goto out_put_prog;
2593                 }
2594                 break;
2595         default:
2596                 err = -EINVAL;
2597                 goto out_put_prog;
2598         }
2599
2600         if (!!tgt_prog_fd != !!btf_id) {
2601                 err = -EINVAL;
2602                 goto out_put_prog;
2603         }
2604
2605         if (tgt_prog_fd) {
2606                 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */
2607                 if (prog->type != BPF_PROG_TYPE_EXT) {
2608                         err = -EINVAL;
2609                         goto out_put_prog;
2610                 }
2611
2612                 tgt_prog = bpf_prog_get(tgt_prog_fd);
2613                 if (IS_ERR(tgt_prog)) {
2614                         err = PTR_ERR(tgt_prog);
2615                         tgt_prog = NULL;
2616                         goto out_put_prog;
2617                 }
2618
2619                 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
2620         }
2621
2622         link = kzalloc(sizeof(*link), GFP_USER);
2623         if (!link) {
2624                 err = -ENOMEM;
2625                 goto out_put_prog;
2626         }
2627         bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2628                       &bpf_tracing_link_lops, prog);
2629         link->attach_type = prog->expected_attach_type;
2630
2631         mutex_lock(&prog->aux->dst_mutex);
2632
2633         /* There are a few possible cases here:
2634          *
2635          * - if prog->aux->dst_trampoline is set, the program was just loaded
2636          *   and not yet attached to anything, so we can use the values stored
2637          *   in prog->aux
2638          *
2639          * - if prog->aux->dst_trampoline is NULL, the program has already been
2640          *   attached to a target and its initial target was cleared (below)
2641          *
2642          * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
2643          *   target_btf_id using the link_create API.
2644          *
2645          * - if tgt_prog == NULL when this function was called using the old
2646          *   raw_tracepoint_open API, and we need a target from prog->aux
2647          *
2648          * The combination of no saved target in prog->aux, and no target
2649          * specified on load is illegal, and we reject that here.
2650          */
2651         if (!prog->aux->dst_trampoline && !tgt_prog) {
2652                 err = -ENOENT;
2653                 goto out_unlock;
2654         }
2655
2656         if (!prog->aux->dst_trampoline ||
2657             (key && key != prog->aux->dst_trampoline->key)) {
2658                 /* If there is no saved target, or the specified target is
2659                  * different from the destination specified at load time, we
2660                  * need a new trampoline and a check for compatibility
2661                  */
2662                 struct bpf_attach_target_info tgt_info = {};
2663
2664                 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
2665                                               &tgt_info);
2666                 if (err)
2667                         goto out_unlock;
2668
2669                 tr = bpf_trampoline_get(key, &tgt_info);
2670                 if (!tr) {
2671                         err = -ENOMEM;
2672                         goto out_unlock;
2673                 }
2674         } else {
2675                 /* The caller didn't specify a target, or the target was the
2676                  * same as the destination supplied during program load. This
2677                  * means we can reuse the trampoline and reference from program
2678                  * load time, and there is no need to allocate a new one. This
2679                  * can only happen once for any program, as the saved values in
2680                  * prog->aux are cleared below.
2681                  */
2682                 tr = prog->aux->dst_trampoline;
2683                 tgt_prog = prog->aux->dst_prog;
2684         }
2685
2686         err = bpf_link_prime(&link->link, &link_primer);
2687         if (err)
2688                 goto out_unlock;
2689
2690         err = bpf_trampoline_link_prog(prog, tr);
2691         if (err) {
2692                 bpf_link_cleanup(&link_primer);
2693                 link = NULL;
2694                 goto out_unlock;
2695         }
2696
2697         link->tgt_prog = tgt_prog;
2698         link->trampoline = tr;
2699
2700         /* Always clear the trampoline and target prog from prog->aux to make
2701          * sure the original attach destination is not kept alive after a
2702          * program is (re-)attached to another target.
2703          */
2704         if (prog->aux->dst_prog &&
2705             (tgt_prog_fd || tr != prog->aux->dst_trampoline))
2706                 /* got extra prog ref from syscall, or attaching to different prog */
2707                 bpf_prog_put(prog->aux->dst_prog);
2708         if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
2709                 /* we allocated a new trampoline, so free the old one */
2710                 bpf_trampoline_put(prog->aux->dst_trampoline);
2711
2712         prog->aux->dst_prog = NULL;
2713         prog->aux->dst_trampoline = NULL;
2714         mutex_unlock(&prog->aux->dst_mutex);
2715
2716         return bpf_link_settle(&link_primer);
2717 out_unlock:
2718         if (tr && tr != prog->aux->dst_trampoline)
2719                 bpf_trampoline_put(tr);
2720         mutex_unlock(&prog->aux->dst_mutex);
2721         kfree(link);
2722 out_put_prog:
2723         if (tgt_prog_fd && tgt_prog)
2724                 bpf_prog_put(tgt_prog);
2725         return err;
2726 }
2727
2728 struct bpf_raw_tp_link {
2729         struct bpf_link link;
2730         struct bpf_raw_event_map *btp;
2731 };
2732
2733 static void bpf_raw_tp_link_release(struct bpf_link *link)
2734 {
2735         struct bpf_raw_tp_link *raw_tp =
2736                 container_of(link, struct bpf_raw_tp_link, link);
2737
2738         bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2739         bpf_put_raw_tracepoint(raw_tp->btp);
2740 }
2741
2742 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2743 {
2744         struct bpf_raw_tp_link *raw_tp =
2745                 container_of(link, struct bpf_raw_tp_link, link);
2746
2747         kfree(raw_tp);
2748 }
2749
2750 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2751                                         struct seq_file *seq)
2752 {
2753         struct bpf_raw_tp_link *raw_tp_link =
2754                 container_of(link, struct bpf_raw_tp_link, link);
2755
2756         seq_printf(seq,
2757                    "tp_name:\t%s\n",
2758                    raw_tp_link->btp->tp->name);
2759 }
2760
2761 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2762                                           struct bpf_link_info *info)
2763 {
2764         struct bpf_raw_tp_link *raw_tp_link =
2765                 container_of(link, struct bpf_raw_tp_link, link);
2766         char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2767         const char *tp_name = raw_tp_link->btp->tp->name;
2768         u32 ulen = info->raw_tracepoint.tp_name_len;
2769         size_t tp_len = strlen(tp_name);
2770
2771         if (!ulen ^ !ubuf)
2772                 return -EINVAL;
2773
2774         info->raw_tracepoint.tp_name_len = tp_len + 1;
2775
2776         if (!ubuf)
2777                 return 0;
2778
2779         if (ulen >= tp_len + 1) {
2780                 if (copy_to_user(ubuf, tp_name, tp_len + 1))
2781                         return -EFAULT;
2782         } else {
2783                 char zero = '\0';
2784
2785                 if (copy_to_user(ubuf, tp_name, ulen - 1))
2786                         return -EFAULT;
2787                 if (put_user(zero, ubuf + ulen - 1))
2788                         return -EFAULT;
2789                 return -ENOSPC;
2790         }
2791
2792         return 0;
2793 }
2794
2795 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2796         .release = bpf_raw_tp_link_release,
2797         .dealloc = bpf_raw_tp_link_dealloc,
2798         .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2799         .fill_link_info = bpf_raw_tp_link_fill_link_info,
2800 };
2801
2802 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2803
2804 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2805 {
2806         struct bpf_link_primer link_primer;
2807         struct bpf_raw_tp_link *link;
2808         struct bpf_raw_event_map *btp;
2809         struct bpf_prog *prog;
2810         const char *tp_name;
2811         char buf[128];
2812         int err;
2813
2814         if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2815                 return -EINVAL;
2816
2817         prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2818         if (IS_ERR(prog))
2819                 return PTR_ERR(prog);
2820
2821         switch (prog->type) {
2822         case BPF_PROG_TYPE_TRACING:
2823         case BPF_PROG_TYPE_EXT:
2824         case BPF_PROG_TYPE_LSM:
2825                 if (attr->raw_tracepoint.name) {
2826                         /* The attach point for this category of programs
2827                          * should be specified via btf_id during program load.
2828                          */
2829                         err = -EINVAL;
2830                         goto out_put_prog;
2831                 }
2832                 if (prog->type == BPF_PROG_TYPE_TRACING &&
2833                     prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2834                         tp_name = prog->aux->attach_func_name;
2835                         break;
2836                 }
2837                 err = bpf_tracing_prog_attach(prog, 0, 0);
2838                 if (err >= 0)
2839                         return err;
2840                 goto out_put_prog;
2841         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2842         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2843                 if (strncpy_from_user(buf,
2844                                       u64_to_user_ptr(attr->raw_tracepoint.name),
2845                                       sizeof(buf) - 1) < 0) {
2846                         err = -EFAULT;
2847                         goto out_put_prog;
2848                 }
2849                 buf[sizeof(buf) - 1] = 0;
2850                 tp_name = buf;
2851                 break;
2852         default:
2853                 err = -EINVAL;
2854                 goto out_put_prog;
2855         }
2856
2857         btp = bpf_get_raw_tracepoint(tp_name);
2858         if (!btp) {
2859                 err = -ENOENT;
2860                 goto out_put_prog;
2861         }
2862
2863         link = kzalloc(sizeof(*link), GFP_USER);
2864         if (!link) {
2865                 err = -ENOMEM;
2866                 goto out_put_btp;
2867         }
2868         bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
2869                       &bpf_raw_tp_link_lops, prog);
2870         link->btp = btp;
2871
2872         err = bpf_link_prime(&link->link, &link_primer);
2873         if (err) {
2874                 kfree(link);
2875                 goto out_put_btp;
2876         }
2877
2878         err = bpf_probe_register(link->btp, prog);
2879         if (err) {
2880                 bpf_link_cleanup(&link_primer);
2881                 goto out_put_btp;
2882         }
2883
2884         return bpf_link_settle(&link_primer);
2885
2886 out_put_btp:
2887         bpf_put_raw_tracepoint(btp);
2888 out_put_prog:
2889         bpf_prog_put(prog);
2890         return err;
2891 }
2892
2893 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2894                                              enum bpf_attach_type attach_type)
2895 {
2896         switch (prog->type) {
2897         case BPF_PROG_TYPE_CGROUP_SOCK:
2898         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2899         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2900         case BPF_PROG_TYPE_SK_LOOKUP:
2901                 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2902         case BPF_PROG_TYPE_CGROUP_SKB:
2903                 if (!capable(CAP_NET_ADMIN))
2904                         /* cg-skb progs can be loaded by unpriv user.
2905                          * check permissions at attach time.
2906                          */
2907                         return -EPERM;
2908                 return prog->enforce_expected_attach_type &&
2909                         prog->expected_attach_type != attach_type ?
2910                         -EINVAL : 0;
2911         default:
2912                 return 0;
2913         }
2914 }
2915
2916 static enum bpf_prog_type
2917 attach_type_to_prog_type(enum bpf_attach_type attach_type)
2918 {
2919         switch (attach_type) {
2920         case BPF_CGROUP_INET_INGRESS:
2921         case BPF_CGROUP_INET_EGRESS:
2922                 return BPF_PROG_TYPE_CGROUP_SKB;
2923         case BPF_CGROUP_INET_SOCK_CREATE:
2924         case BPF_CGROUP_INET_SOCK_RELEASE:
2925         case BPF_CGROUP_INET4_POST_BIND:
2926         case BPF_CGROUP_INET6_POST_BIND:
2927                 return BPF_PROG_TYPE_CGROUP_SOCK;
2928         case BPF_CGROUP_INET4_BIND:
2929         case BPF_CGROUP_INET6_BIND:
2930         case BPF_CGROUP_INET4_CONNECT:
2931         case BPF_CGROUP_INET6_CONNECT:
2932         case BPF_CGROUP_INET4_GETPEERNAME:
2933         case BPF_CGROUP_INET6_GETPEERNAME:
2934         case BPF_CGROUP_INET4_GETSOCKNAME:
2935         case BPF_CGROUP_INET6_GETSOCKNAME:
2936         case BPF_CGROUP_UDP4_SENDMSG:
2937         case BPF_CGROUP_UDP6_SENDMSG:
2938         case BPF_CGROUP_UDP4_RECVMSG:
2939         case BPF_CGROUP_UDP6_RECVMSG:
2940                 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2941         case BPF_CGROUP_SOCK_OPS:
2942                 return BPF_PROG_TYPE_SOCK_OPS;
2943         case BPF_CGROUP_DEVICE:
2944                 return BPF_PROG_TYPE_CGROUP_DEVICE;
2945         case BPF_SK_MSG_VERDICT:
2946                 return BPF_PROG_TYPE_SK_MSG;
2947         case BPF_SK_SKB_STREAM_PARSER:
2948         case BPF_SK_SKB_STREAM_VERDICT:
2949                 return BPF_PROG_TYPE_SK_SKB;
2950         case BPF_LIRC_MODE2:
2951                 return BPF_PROG_TYPE_LIRC_MODE2;
2952         case BPF_FLOW_DISSECTOR:
2953                 return BPF_PROG_TYPE_FLOW_DISSECTOR;
2954         case BPF_CGROUP_SYSCTL:
2955                 return BPF_PROG_TYPE_CGROUP_SYSCTL;
2956         case BPF_CGROUP_GETSOCKOPT:
2957         case BPF_CGROUP_SETSOCKOPT:
2958                 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
2959         case BPF_TRACE_ITER:
2960                 return BPF_PROG_TYPE_TRACING;
2961         case BPF_SK_LOOKUP:
2962                 return BPF_PROG_TYPE_SK_LOOKUP;
2963         case BPF_XDP:
2964                 return BPF_PROG_TYPE_XDP;
2965         default:
2966                 return BPF_PROG_TYPE_UNSPEC;
2967         }
2968 }
2969
2970 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
2971
2972 #define BPF_F_ATTACH_MASK \
2973         (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
2974
2975 static int bpf_prog_attach(const union bpf_attr *attr)
2976 {
2977         enum bpf_prog_type ptype;
2978         struct bpf_prog *prog;
2979         int ret;
2980
2981         if (CHECK_ATTR(BPF_PROG_ATTACH))
2982                 return -EINVAL;
2983
2984         if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
2985                 return -EINVAL;
2986
2987         ptype = attach_type_to_prog_type(attr->attach_type);
2988         if (ptype == BPF_PROG_TYPE_UNSPEC)
2989                 return -EINVAL;
2990
2991         prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
2992         if (IS_ERR(prog))
2993                 return PTR_ERR(prog);
2994
2995         if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
2996                 bpf_prog_put(prog);
2997                 return -EINVAL;
2998         }
2999
3000         switch (ptype) {
3001         case BPF_PROG_TYPE_SK_SKB:
3002         case BPF_PROG_TYPE_SK_MSG:
3003                 ret = sock_map_get_from_fd(attr, prog);
3004                 break;
3005         case BPF_PROG_TYPE_LIRC_MODE2:
3006                 ret = lirc_prog_attach(attr, prog);
3007                 break;
3008         case BPF_PROG_TYPE_FLOW_DISSECTOR:
3009                 ret = netns_bpf_prog_attach(attr, prog);
3010                 break;
3011         case BPF_PROG_TYPE_CGROUP_DEVICE:
3012         case BPF_PROG_TYPE_CGROUP_SKB:
3013         case BPF_PROG_TYPE_CGROUP_SOCK:
3014         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3015         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3016         case BPF_PROG_TYPE_CGROUP_SYSCTL:
3017         case BPF_PROG_TYPE_SOCK_OPS:
3018                 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3019                 break;
3020         default:
3021                 ret = -EINVAL;
3022         }
3023
3024         if (ret)
3025                 bpf_prog_put(prog);
3026         return ret;
3027 }
3028
3029 #define BPF_PROG_DETACH_LAST_FIELD attach_type
3030
3031 static int bpf_prog_detach(const union bpf_attr *attr)
3032 {
3033         enum bpf_prog_type ptype;
3034
3035         if (CHECK_ATTR(BPF_PROG_DETACH))
3036                 return -EINVAL;
3037
3038         ptype = attach_type_to_prog_type(attr->attach_type);
3039
3040         switch (ptype) {
3041         case BPF_PROG_TYPE_SK_MSG:
3042         case BPF_PROG_TYPE_SK_SKB:
3043                 return sock_map_prog_detach(attr, ptype);
3044         case BPF_PROG_TYPE_LIRC_MODE2:
3045                 return lirc_prog_detach(attr);
3046         case BPF_PROG_TYPE_FLOW_DISSECTOR:
3047                 return netns_bpf_prog_detach(attr, ptype);
3048         case BPF_PROG_TYPE_CGROUP_DEVICE:
3049         case BPF_PROG_TYPE_CGROUP_SKB:
3050         case BPF_PROG_TYPE_CGROUP_SOCK:
3051         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3052         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3053         case BPF_PROG_TYPE_CGROUP_SYSCTL:
3054         case BPF_PROG_TYPE_SOCK_OPS:
3055                 return cgroup_bpf_prog_detach(attr, ptype);
3056         default:
3057                 return -EINVAL;
3058         }
3059 }
3060
3061 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
3062
3063 static int bpf_prog_query(const union bpf_attr *attr,
3064                           union bpf_attr __user *uattr)
3065 {
3066         if (!capable(CAP_NET_ADMIN))
3067                 return -EPERM;
3068         if (CHECK_ATTR(BPF_PROG_QUERY))
3069                 return -EINVAL;
3070         if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3071                 return -EINVAL;
3072
3073         switch (attr->query.attach_type) {
3074         case BPF_CGROUP_INET_INGRESS:
3075         case BPF_CGROUP_INET_EGRESS:
3076         case BPF_CGROUP_INET_SOCK_CREATE:
3077         case BPF_CGROUP_INET_SOCK_RELEASE:
3078         case BPF_CGROUP_INET4_BIND:
3079         case BPF_CGROUP_INET6_BIND:
3080         case BPF_CGROUP_INET4_POST_BIND:
3081         case BPF_CGROUP_INET6_POST_BIND:
3082         case BPF_CGROUP_INET4_CONNECT:
3083         case BPF_CGROUP_INET6_CONNECT:
3084         case BPF_CGROUP_INET4_GETPEERNAME:
3085         case BPF_CGROUP_INET6_GETPEERNAME:
3086         case BPF_CGROUP_INET4_GETSOCKNAME:
3087         case BPF_CGROUP_INET6_GETSOCKNAME:
3088         case BPF_CGROUP_UDP4_SENDMSG:
3089         case BPF_CGROUP_UDP6_SENDMSG:
3090         case BPF_CGROUP_UDP4_RECVMSG:
3091         case BPF_CGROUP_UDP6_RECVMSG:
3092         case BPF_CGROUP_SOCK_OPS:
3093         case BPF_CGROUP_DEVICE:
3094         case BPF_CGROUP_SYSCTL:
3095         case BPF_CGROUP_GETSOCKOPT:
3096         case BPF_CGROUP_SETSOCKOPT:
3097                 return cgroup_bpf_prog_query(attr, uattr);
3098         case BPF_LIRC_MODE2:
3099                 return lirc_prog_query(attr, uattr);
3100         case BPF_FLOW_DISSECTOR:
3101         case BPF_SK_LOOKUP:
3102                 return netns_bpf_prog_query(attr, uattr);
3103         default:
3104                 return -EINVAL;
3105         }
3106 }
3107
3108 #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
3109
3110 static int bpf_prog_test_run(const union bpf_attr *attr,
3111                              union bpf_attr __user *uattr)
3112 {
3113         struct bpf_prog *prog;
3114         int ret = -ENOTSUPP;
3115
3116         if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3117                 return -EINVAL;
3118
3119         if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3120             (!attr->test.ctx_size_in && attr->test.ctx_in))
3121                 return -EINVAL;
3122
3123         if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3124             (!attr->test.ctx_size_out && attr->test.ctx_out))
3125                 return -EINVAL;
3126
3127         prog = bpf_prog_get(attr->test.prog_fd);
3128         if (IS_ERR(prog))
3129                 return PTR_ERR(prog);
3130
3131         if (prog->aux->ops->test_run)
3132                 ret = prog->aux->ops->test_run(prog, attr, uattr);
3133
3134         bpf_prog_put(prog);
3135         return ret;
3136 }
3137
3138 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3139
3140 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3141                                union bpf_attr __user *uattr,
3142                                struct idr *idr,
3143                                spinlock_t *lock)
3144 {
3145         u32 next_id = attr->start_id;
3146         int err = 0;
3147
3148         if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3149                 return -EINVAL;
3150
3151         if (!capable(CAP_SYS_ADMIN))
3152                 return -EPERM;
3153
3154         next_id++;
3155         spin_lock_bh(lock);
3156         if (!idr_get_next(idr, &next_id))
3157                 err = -ENOENT;
3158         spin_unlock_bh(lock);
3159
3160         if (!err)
3161                 err = put_user(next_id, &uattr->next_id);
3162
3163         return err;
3164 }
3165
3166 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3167 {
3168         struct bpf_map *map;
3169
3170         spin_lock_bh(&map_idr_lock);
3171 again:
3172         map = idr_get_next(&map_idr, id);
3173         if (map) {
3174                 map = __bpf_map_inc_not_zero(map, false);
3175                 if (IS_ERR(map)) {
3176                         (*id)++;
3177                         goto again;
3178                 }
3179         }
3180         spin_unlock_bh(&map_idr_lock);
3181
3182         return map;
3183 }
3184
3185 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3186 {
3187         struct bpf_prog *prog;
3188
3189         spin_lock_bh(&prog_idr_lock);
3190 again:
3191         prog = idr_get_next(&prog_idr, id);
3192         if (prog) {
3193                 prog = bpf_prog_inc_not_zero(prog);
3194                 if (IS_ERR(prog)) {
3195                         (*id)++;
3196                         goto again;
3197                 }
3198         }
3199         spin_unlock_bh(&prog_idr_lock);
3200
3201         return prog;
3202 }
3203
3204 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3205
3206 struct bpf_prog *bpf_prog_by_id(u32 id)
3207 {
3208         struct bpf_prog *prog;
3209
3210         if (!id)
3211                 return ERR_PTR(-ENOENT);
3212
3213         spin_lock_bh(&prog_idr_lock);
3214         prog = idr_find(&prog_idr, id);
3215         if (prog)
3216                 prog = bpf_prog_inc_not_zero(prog);
3217         else
3218                 prog = ERR_PTR(-ENOENT);
3219         spin_unlock_bh(&prog_idr_lock);
3220         return prog;
3221 }
3222
3223 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3224 {
3225         struct bpf_prog *prog;
3226         u32 id = attr->prog_id;
3227         int fd;
3228
3229         if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3230                 return -EINVAL;
3231
3232         if (!capable(CAP_SYS_ADMIN))
3233                 return -EPERM;
3234
3235         prog = bpf_prog_by_id(id);
3236         if (IS_ERR(prog))
3237                 return PTR_ERR(prog);
3238
3239         fd = bpf_prog_new_fd(prog);
3240         if (fd < 0)
3241                 bpf_prog_put(prog);
3242
3243         return fd;
3244 }
3245
3246 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3247
3248 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3249 {
3250         struct bpf_map *map;
3251         u32 id = attr->map_id;
3252         int f_flags;
3253         int fd;
3254
3255         if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3256             attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3257                 return -EINVAL;
3258
3259         if (!capable(CAP_SYS_ADMIN))
3260                 return -EPERM;
3261
3262         f_flags = bpf_get_file_flag(attr->open_flags);
3263         if (f_flags < 0)
3264                 return f_flags;
3265
3266         spin_lock_bh(&map_idr_lock);
3267         map = idr_find(&map_idr, id);
3268         if (map)
3269                 map = __bpf_map_inc_not_zero(map, true);
3270         else
3271                 map = ERR_PTR(-ENOENT);
3272         spin_unlock_bh(&map_idr_lock);
3273
3274         if (IS_ERR(map))
3275                 return PTR_ERR(map);
3276
3277         fd = bpf_map_new_fd(map, f_flags);
3278         if (fd < 0)
3279                 bpf_map_put_with_uref(map);
3280
3281         return fd;
3282 }
3283
3284 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3285                                               unsigned long addr, u32 *off,
3286                                               u32 *type)
3287 {
3288         const struct bpf_map *map;
3289         int i;
3290
3291         mutex_lock(&prog->aux->used_maps_mutex);
3292         for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3293                 map = prog->aux->used_maps[i];
3294                 if (map == (void *)addr) {
3295                         *type = BPF_PSEUDO_MAP_FD;
3296                         goto out;
3297                 }
3298                 if (!map->ops->map_direct_value_meta)
3299                         continue;
3300                 if (!map->ops->map_direct_value_meta(map, addr, off)) {
3301                         *type = BPF_PSEUDO_MAP_VALUE;
3302                         goto out;
3303                 }
3304         }
3305         map = NULL;
3306
3307 out:
3308         mutex_unlock(&prog->aux->used_maps_mutex);
3309         return map;
3310 }
3311
3312 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3313                                               const struct cred *f_cred)
3314 {
3315         const struct bpf_map *map;
3316         struct bpf_insn *insns;
3317         u32 off, type;
3318         u64 imm;
3319         u8 code;
3320         int i;
3321
3322         insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3323                         GFP_USER);
3324         if (!insns)
3325                 return insns;
3326
3327         for (i = 0; i < prog->len; i++) {
3328                 code = insns[i].code;
3329
3330                 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3331                         insns[i].code = BPF_JMP | BPF_CALL;
3332                         insns[i].imm = BPF_FUNC_tail_call;
3333                         /* fall-through */
3334                 }
3335                 if (code == (BPF_JMP | BPF_CALL) ||
3336                     code == (BPF_JMP | BPF_CALL_ARGS)) {
3337                         if (code == (BPF_JMP | BPF_CALL_ARGS))
3338                                 insns[i].code = BPF_JMP | BPF_CALL;
3339                         if (!bpf_dump_raw_ok(f_cred))
3340                                 insns[i].imm = 0;
3341                         continue;
3342                 }
3343                 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3344                         insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3345                         continue;
3346                 }
3347
3348                 if (code != (BPF_LD | BPF_IMM | BPF_DW))
3349                         continue;
3350
3351                 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3352                 map = bpf_map_from_imm(prog, imm, &off, &type);
3353                 if (map) {
3354                         insns[i].src_reg = type;
3355                         insns[i].imm = map->id;
3356                         insns[i + 1].imm = off;
3357                         continue;
3358                 }
3359         }
3360
3361         return insns;
3362 }
3363
3364 static int set_info_rec_size(struct bpf_prog_info *info)
3365 {
3366         /*
3367          * Ensure info.*_rec_size is the same as kernel expected size
3368          *
3369          * or
3370          *
3371          * Only allow zero *_rec_size if both _rec_size and _cnt are
3372          * zero.  In this case, the kernel will set the expected
3373          * _rec_size back to the info.
3374          */
3375
3376         if ((info->nr_func_info || info->func_info_rec_size) &&
3377             info->func_info_rec_size != sizeof(struct bpf_func_info))
3378                 return -EINVAL;
3379
3380         if ((info->nr_line_info || info->line_info_rec_size) &&
3381             info->line_info_rec_size != sizeof(struct bpf_line_info))
3382                 return -EINVAL;
3383
3384         if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3385             info->jited_line_info_rec_size != sizeof(__u64))
3386                 return -EINVAL;
3387
3388         info->func_info_rec_size = sizeof(struct bpf_func_info);
3389         info->line_info_rec_size = sizeof(struct bpf_line_info);
3390         info->jited_line_info_rec_size = sizeof(__u64);
3391
3392         return 0;
3393 }
3394
3395 static int bpf_prog_get_info_by_fd(struct file *file,
3396                                    struct bpf_prog *prog,
3397                                    const union bpf_attr *attr,
3398                                    union bpf_attr __user *uattr)
3399 {
3400         struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3401         struct bpf_prog_info info;
3402         u32 info_len = attr->info.info_len;
3403         struct bpf_prog_stats stats;
3404         char __user *uinsns;
3405         u32 ulen;
3406         int err;
3407
3408         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3409         if (err)
3410                 return err;
3411         info_len = min_t(u32, sizeof(info), info_len);
3412
3413         memset(&info, 0, sizeof(info));
3414         if (copy_from_user(&info, uinfo, info_len))
3415                 return -EFAULT;
3416
3417         info.type = prog->type;
3418         info.id = prog->aux->id;
3419         info.load_time = prog->aux->load_time;
3420         info.created_by_uid = from_kuid_munged(current_user_ns(),
3421                                                prog->aux->user->uid);
3422         info.gpl_compatible = prog->gpl_compatible;
3423
3424         memcpy(info.tag, prog->tag, sizeof(prog->tag));
3425         memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3426
3427         mutex_lock(&prog->aux->used_maps_mutex);
3428         ulen = info.nr_map_ids;
3429         info.nr_map_ids = prog->aux->used_map_cnt;
3430         ulen = min_t(u32, info.nr_map_ids, ulen);
3431         if (ulen) {
3432                 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3433                 u32 i;
3434
3435                 for (i = 0; i < ulen; i++)
3436                         if (put_user(prog->aux->used_maps[i]->id,
3437                                      &user_map_ids[i])) {
3438                                 mutex_unlock(&prog->aux->used_maps_mutex);
3439                                 return -EFAULT;
3440                         }
3441         }
3442         mutex_unlock(&prog->aux->used_maps_mutex);
3443
3444         err = set_info_rec_size(&info);
3445         if (err)
3446                 return err;
3447
3448         bpf_prog_get_stats(prog, &stats);
3449         info.run_time_ns = stats.nsecs;
3450         info.run_cnt = stats.cnt;
3451         info.recursion_misses = stats.misses;
3452
3453         if (!bpf_capable()) {
3454                 info.jited_prog_len = 0;
3455                 info.xlated_prog_len = 0;
3456                 info.nr_jited_ksyms = 0;
3457                 info.nr_jited_func_lens = 0;
3458                 info.nr_func_info = 0;
3459                 info.nr_line_info = 0;
3460                 info.nr_jited_line_info = 0;
3461                 goto done;
3462         }
3463
3464         ulen = info.xlated_prog_len;
3465         info.xlated_prog_len = bpf_prog_insn_size(prog);
3466         if (info.xlated_prog_len && ulen) {
3467                 struct bpf_insn *insns_sanitized;
3468                 bool fault;
3469
3470                 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3471                         info.xlated_prog_insns = 0;
3472                         goto done;
3473                 }
3474                 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3475                 if (!insns_sanitized)
3476                         return -ENOMEM;
3477                 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3478                 ulen = min_t(u32, info.xlated_prog_len, ulen);
3479                 fault = copy_to_user(uinsns, insns_sanitized, ulen);
3480                 kfree(insns_sanitized);
3481                 if (fault)
3482                         return -EFAULT;
3483         }
3484
3485         if (bpf_prog_is_dev_bound(prog->aux)) {
3486                 err = bpf_prog_offload_info_fill(&info, prog);
3487                 if (err)
3488                         return err;
3489                 goto done;
3490         }
3491
3492         /* NOTE: the following code is supposed to be skipped for offload.
3493          * bpf_prog_offload_info_fill() is the place to fill similar fields
3494          * for offload.
3495          */
3496         ulen = info.jited_prog_len;
3497         if (prog->aux->func_cnt) {
3498                 u32 i;
3499
3500                 info.jited_prog_len = 0;
3501                 for (i = 0; i < prog->aux->func_cnt; i++)
3502                         info.jited_prog_len += prog->aux->func[i]->jited_len;
3503         } else {
3504                 info.jited_prog_len = prog->jited_len;
3505         }
3506
3507         if (info.jited_prog_len && ulen) {
3508                 if (bpf_dump_raw_ok(file->f_cred)) {
3509                         uinsns = u64_to_user_ptr(info.jited_prog_insns);
3510                         ulen = min_t(u32, info.jited_prog_len, ulen);
3511
3512                         /* for multi-function programs, copy the JITed
3513                          * instructions for all the functions
3514                          */
3515                         if (prog->aux->func_cnt) {
3516                                 u32 len, free, i;
3517                                 u8 *img;
3518
3519                                 free = ulen;
3520                                 for (i = 0; i < prog->aux->func_cnt; i++) {
3521                                         len = prog->aux->func[i]->jited_len;
3522                                         len = min_t(u32, len, free);
3523                                         img = (u8 *) prog->aux->func[i]->bpf_func;
3524                                         if (copy_to_user(uinsns, img, len))
3525                                                 return -EFAULT;
3526                                         uinsns += len;
3527                                         free -= len;
3528                                         if (!free)
3529                                                 break;
3530                                 }
3531                         } else {
3532                                 if (copy_to_user(uinsns, prog->bpf_func, ulen))
3533                                         return -EFAULT;
3534                         }
3535                 } else {
3536                         info.jited_prog_insns = 0;
3537                 }
3538         }
3539
3540         ulen = info.nr_jited_ksyms;
3541         info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3542         if (ulen) {
3543                 if (bpf_dump_raw_ok(file->f_cred)) {
3544                         unsigned long ksym_addr;
3545                         u64 __user *user_ksyms;
3546                         u32 i;
3547
3548                         /* copy the address of the kernel symbol
3549                          * corresponding to each function
3550                          */
3551                         ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3552                         user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3553                         if (prog->aux->func_cnt) {
3554                                 for (i = 0; i < ulen; i++) {
3555                                         ksym_addr = (unsigned long)
3556                                                 prog->aux->func[i]->bpf_func;
3557                                         if (put_user((u64) ksym_addr,
3558                                                      &user_ksyms[i]))
3559                                                 return -EFAULT;
3560                                 }
3561                         } else {
3562                                 ksym_addr = (unsigned long) prog->bpf_func;
3563                                 if (put_user((u64) ksym_addr, &user_ksyms[0]))
3564                                         return -EFAULT;
3565                         }
3566                 } else {
3567                         info.jited_ksyms = 0;
3568                 }
3569         }
3570
3571         ulen = info.nr_jited_func_lens;
3572         info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3573         if (ulen) {
3574                 if (bpf_dump_raw_ok(file->f_cred)) {
3575                         u32 __user *user_lens;
3576                         u32 func_len, i;
3577
3578                         /* copy the JITed image lengths for each function */
3579                         ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3580                         user_lens = u64_to_user_ptr(info.jited_func_lens);
3581                         if (prog->aux->func_cnt) {
3582                                 for (i = 0; i < ulen; i++) {
3583                                         func_len =
3584                                                 prog->aux->func[i]->jited_len;
3585                                         if (put_user(func_len, &user_lens[i]))
3586                                                 return -EFAULT;
3587                                 }
3588                         } else {
3589                                 func_len = prog->jited_len;
3590                                 if (put_user(func_len, &user_lens[0]))
3591                                         return -EFAULT;
3592                         }
3593                 } else {
3594                         info.jited_func_lens = 0;
3595                 }
3596         }
3597
3598         if (prog->aux->btf)
3599                 info.btf_id = btf_obj_id(prog->aux->btf);
3600
3601         ulen = info.nr_func_info;
3602         info.nr_func_info = prog->aux->func_info_cnt;
3603         if (info.nr_func_info && ulen) {
3604                 char __user *user_finfo;
3605
3606                 user_finfo = u64_to_user_ptr(info.func_info);
3607                 ulen = min_t(u32, info.nr_func_info, ulen);
3608                 if (copy_to_user(user_finfo, prog->aux->func_info,
3609                                  info.func_info_rec_size * ulen))
3610                         return -EFAULT;
3611         }
3612
3613         ulen = info.nr_line_info;
3614         info.nr_line_info = prog->aux->nr_linfo;
3615         if (info.nr_line_info && ulen) {
3616                 __u8 __user *user_linfo;
3617
3618                 user_linfo = u64_to_user_ptr(info.line_info);
3619                 ulen = min_t(u32, info.nr_line_info, ulen);
3620                 if (copy_to_user(user_linfo, prog->aux->linfo,
3621                                  info.line_info_rec_size * ulen))
3622                         return -EFAULT;
3623         }
3624
3625         ulen = info.nr_jited_line_info;
3626         if (prog->aux->jited_linfo)
3627                 info.nr_jited_line_info = prog->aux->nr_linfo;
3628         else
3629                 info.nr_jited_line_info = 0;
3630         if (info.nr_jited_line_info && ulen) {
3631                 if (bpf_dump_raw_ok(file->f_cred)) {
3632                         __u64 __user *user_linfo;
3633                         u32 i;
3634
3635                         user_linfo = u64_to_user_ptr(info.jited_line_info);
3636                         ulen = min_t(u32, info.nr_jited_line_info, ulen);
3637                         for (i = 0; i < ulen; i++) {
3638                                 if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3639                                              &user_linfo[i]))
3640                                         return -EFAULT;
3641                         }
3642                 } else {
3643                         info.jited_line_info = 0;
3644                 }
3645         }
3646
3647         ulen = info.nr_prog_tags;
3648         info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3649         if (ulen) {
3650                 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3651                 u32 i;
3652
3653                 user_prog_tags = u64_to_user_ptr(info.prog_tags);
3654                 ulen = min_t(u32, info.nr_prog_tags, ulen);
3655                 if (prog->aux->func_cnt) {
3656                         for (i = 0; i < ulen; i++) {
3657                                 if (copy_to_user(user_prog_tags[i],
3658                                                  prog->aux->func[i]->tag,
3659                                                  BPF_TAG_SIZE))
3660                                         return -EFAULT;
3661                         }
3662                 } else {
3663                         if (copy_to_user(user_prog_tags[0],
3664                                          prog->tag, BPF_TAG_SIZE))
3665                                 return -EFAULT;
3666                 }
3667         }
3668
3669 done:
3670         if (copy_to_user(uinfo, &info, info_len) ||
3671             put_user(info_len, &uattr->info.info_len))
3672                 return -EFAULT;
3673
3674         return 0;
3675 }
3676
3677 static int bpf_map_get_info_by_fd(struct file *file,
3678                                   struct bpf_map *map,
3679                                   const union bpf_attr *attr,
3680                                   union bpf_attr __user *uattr)
3681 {
3682         struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3683         struct bpf_map_info info;
3684         u32 info_len = attr->info.info_len;
3685         int err;
3686
3687         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3688         if (err)
3689                 return err;
3690         info_len = min_t(u32, sizeof(info), info_len);
3691
3692         memset(&info, 0, sizeof(info));
3693         info.type = map->map_type;
3694         info.id = map->id;
3695         info.key_size = map->key_size;
3696         info.value_size = map->value_size;
3697         info.max_entries = map->max_entries;
3698         info.map_flags = map->map_flags;
3699         memcpy(info.name, map->name, sizeof(map->name));
3700
3701         if (map->btf) {
3702                 info.btf_id = btf_obj_id(map->btf);
3703                 info.btf_key_type_id = map->btf_key_type_id;
3704                 info.btf_value_type_id = map->btf_value_type_id;
3705         }
3706         info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3707
3708         if (bpf_map_is_dev_bound(map)) {
3709                 err = bpf_map_offload_info_fill(&info, map);
3710                 if (err)
3711                         return err;
3712         }
3713
3714         if (copy_to_user(uinfo, &info, info_len) ||
3715             put_user(info_len, &uattr->info.info_len))
3716                 return -EFAULT;
3717
3718         return 0;
3719 }
3720
3721 static int bpf_btf_get_info_by_fd(struct file *file,
3722                                   struct btf *btf,
3723                                   const union bpf_attr *attr,
3724                                   union bpf_attr __user *uattr)
3725 {
3726         struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3727         u32 info_len = attr->info.info_len;
3728         int err;
3729
3730         err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
3731         if (err)
3732                 return err;
3733
3734         return btf_get_info_by_fd(btf, attr, uattr);
3735 }
3736
3737 static int bpf_link_get_info_by_fd(struct file *file,
3738                                   struct bpf_link *link,
3739                                   const union bpf_attr *attr,
3740                                   union bpf_attr __user *uattr)
3741 {
3742         struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3743         struct bpf_link_info info;
3744         u32 info_len = attr->info.info_len;
3745         int err;
3746
3747         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3748         if (err)
3749                 return err;
3750         info_len = min_t(u32, sizeof(info), info_len);
3751
3752         memset(&info, 0, sizeof(info));
3753         if (copy_from_user(&info, uinfo, info_len))
3754                 return -EFAULT;
3755
3756         info.type = link->type;
3757         info.id = link->id;
3758         info.prog_id = link->prog->aux->id;
3759
3760         if (link->ops->fill_link_info) {
3761                 err = link->ops->fill_link_info(link, &info);
3762                 if (err)
3763                         return err;
3764         }
3765
3766         if (copy_to_user(uinfo, &info, info_len) ||
3767             put_user(info_len, &uattr->info.info_len))
3768                 return -EFAULT;
3769
3770         return 0;
3771 }
3772
3773
3774 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3775
3776 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3777                                   union bpf_attr __user *uattr)
3778 {
3779         int ufd = attr->info.bpf_fd;
3780         struct fd f;
3781         int err;
3782
3783         if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3784                 return -EINVAL;
3785
3786         f = fdget(ufd);
3787         if (!f.file)
3788                 return -EBADFD;
3789
3790         if (f.file->f_op == &bpf_prog_fops)
3791                 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
3792                                               uattr);
3793         else if (f.file->f_op == &bpf_map_fops)
3794                 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
3795                                              uattr);
3796         else if (f.file->f_op == &btf_fops)
3797                 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
3798         else if (f.file->f_op == &bpf_link_fops)
3799                 err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
3800                                               attr, uattr);
3801         else
3802                 err = -EINVAL;
3803
3804         fdput(f);
3805         return err;
3806 }
3807
3808 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3809
3810 static int bpf_btf_load(const union bpf_attr *attr)
3811 {
3812         if (CHECK_ATTR(BPF_BTF_LOAD))
3813                 return -EINVAL;
3814
3815         if (!bpf_capable())
3816                 return -EPERM;
3817
3818         return btf_new_fd(attr);
3819 }
3820
3821 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3822
3823 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3824 {
3825         if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
3826                 return -EINVAL;
3827
3828         if (!capable(CAP_SYS_ADMIN))
3829                 return -EPERM;
3830
3831         return btf_get_fd_by_id(attr->btf_id);
3832 }
3833
3834 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
3835                                     union bpf_attr __user *uattr,
3836                                     u32 prog_id, u32 fd_type,
3837                                     const char *buf, u64 probe_offset,
3838                                     u64 probe_addr)
3839 {
3840         char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
3841         u32 len = buf ? strlen(buf) : 0, input_len;
3842         int err = 0;
3843
3844         if (put_user(len, &uattr->task_fd_query.buf_len))
3845                 return -EFAULT;
3846         input_len = attr->task_fd_query.buf_len;
3847         if (input_len && ubuf) {
3848                 if (!len) {
3849                         /* nothing to copy, just make ubuf NULL terminated */
3850                         char zero = '\0';
3851
3852                         if (put_user(zero, ubuf))
3853                                 return -EFAULT;
3854                 } else if (input_len >= len + 1) {
3855                         /* ubuf can hold the string with NULL terminator */
3856                         if (copy_to_user(ubuf, buf, len + 1))
3857                                 return -EFAULT;
3858                 } else {
3859                         /* ubuf cannot hold the string with NULL terminator,
3860                          * do a partial copy with NULL terminator.
3861                          */
3862                         char zero = '\0';
3863
3864                         err = -ENOSPC;
3865                         if (copy_to_user(ubuf, buf, input_len - 1))
3866                                 return -EFAULT;
3867                         if (put_user(zero, ubuf + input_len - 1))
3868                                 return -EFAULT;
3869                 }
3870         }
3871
3872         if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
3873             put_user(fd_type, &uattr->task_fd_query.fd_type) ||
3874             put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
3875             put_user(probe_addr, &uattr->task_fd_query.probe_addr))
3876                 return -EFAULT;
3877
3878         return err;
3879 }
3880
3881 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
3882
3883 static int bpf_task_fd_query(const union bpf_attr *attr,
3884                              union bpf_attr __user *uattr)
3885 {
3886         pid_t pid = attr->task_fd_query.pid;
3887         u32 fd = attr->task_fd_query.fd;
3888         const struct perf_event *event;
3889         struct task_struct *task;
3890         struct file *file;
3891         int err;
3892
3893         if (CHECK_ATTR(BPF_TASK_FD_QUERY))
3894                 return -EINVAL;
3895
3896         if (!capable(CAP_SYS_ADMIN))
3897                 return -EPERM;
3898
3899         if (attr->task_fd_query.flags != 0)
3900                 return -EINVAL;
3901
3902         task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3903         if (!task)
3904                 return -ENOENT;
3905
3906         err = 0;
3907         file = fget_task(task, fd);
3908         put_task_struct(task);
3909         if (!file)
3910                 return -EBADF;
3911
3912         if (file->f_op == &bpf_link_fops) {
3913                 struct bpf_link *link = file->private_data;
3914
3915                 if (link->ops == &bpf_raw_tp_link_lops) {
3916                         struct bpf_raw_tp_link *raw_tp =
3917                                 container_of(link, struct bpf_raw_tp_link, link);
3918                         struct bpf_raw_event_map *btp = raw_tp->btp;
3919
3920                         err = bpf_task_fd_query_copy(attr, uattr,
3921                                                      raw_tp->link.prog->aux->id,
3922                                                      BPF_FD_TYPE_RAW_TRACEPOINT,
3923                                                      btp->tp->name, 0, 0);
3924                         goto put_file;
3925                 }
3926                 goto out_not_supp;
3927         }
3928
3929         event = perf_get_event(file);
3930         if (!IS_ERR(event)) {
3931                 u64 probe_offset, probe_addr;
3932                 u32 prog_id, fd_type;
3933                 const char *buf;
3934
3935                 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
3936                                               &buf, &probe_offset,
3937                                               &probe_addr);
3938                 if (!err)
3939                         err = bpf_task_fd_query_copy(attr, uattr, prog_id,
3940                                                      fd_type, buf,
3941                                                      probe_offset,
3942                                                      probe_addr);
3943                 goto put_file;
3944         }
3945
3946 out_not_supp:
3947         err = -ENOTSUPP;
3948 put_file:
3949         fput(file);
3950         return err;
3951 }
3952
3953 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
3954
3955 #define BPF_DO_BATCH(fn)                        \
3956         do {                                    \
3957                 if (!fn) {                      \
3958                         err = -ENOTSUPP;        \
3959                         goto err_put;           \
3960                 }                               \
3961                 err = fn(map, attr, uattr);     \
3962         } while (0)
3963
3964 static int bpf_map_do_batch(const union bpf_attr *attr,
3965                             union bpf_attr __user *uattr,
3966                             int cmd)
3967 {
3968         struct bpf_map *map;
3969         int err, ufd;
3970         struct fd f;
3971
3972         if (CHECK_ATTR(BPF_MAP_BATCH))
3973                 return -EINVAL;
3974
3975         ufd = attr->batch.map_fd;
3976         f = fdget(ufd);
3977         map = __bpf_map_get(f);
3978         if (IS_ERR(map))
3979                 return PTR_ERR(map);
3980
3981         if ((cmd == BPF_MAP_LOOKUP_BATCH ||
3982              cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
3983             !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
3984                 err = -EPERM;
3985                 goto err_put;
3986         }
3987
3988         if (cmd != BPF_MAP_LOOKUP_BATCH &&
3989             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
3990                 err = -EPERM;
3991                 goto err_put;
3992         }
3993
3994         if (cmd == BPF_MAP_LOOKUP_BATCH)
3995                 BPF_DO_BATCH(map->ops->map_lookup_batch);
3996         else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
3997                 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
3998         else if (cmd == BPF_MAP_UPDATE_BATCH)
3999                 BPF_DO_BATCH(map->ops->map_update_batch);
4000         else
4001                 BPF_DO_BATCH(map->ops->map_delete_batch);
4002
4003 err_put:
4004         fdput(f);
4005         return err;
4006 }
4007
4008 static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
4009 {
4010         if (attr->link_create.attach_type != prog->expected_attach_type)
4011                 return -EINVAL;
4012
4013         if (prog->expected_attach_type == BPF_TRACE_ITER)
4014                 return bpf_iter_link_attach(attr, prog);
4015         else if (prog->type == BPF_PROG_TYPE_EXT)
4016                 return bpf_tracing_prog_attach(prog,
4017                                                attr->link_create.target_fd,
4018                                                attr->link_create.target_btf_id);
4019         return -EINVAL;
4020 }
4021
4022 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
4023 static int link_create(union bpf_attr *attr)
4024 {
4025         enum bpf_prog_type ptype;
4026         struct bpf_prog *prog;
4027         int ret;
4028
4029         if (CHECK_ATTR(BPF_LINK_CREATE))
4030                 return -EINVAL;
4031
4032         prog = bpf_prog_get(attr->link_create.prog_fd);
4033         if (IS_ERR(prog))
4034                 return PTR_ERR(prog);
4035
4036         ret = bpf_prog_attach_check_attach_type(prog,
4037                                                 attr->link_create.attach_type);
4038         if (ret)
4039                 goto out;
4040
4041         if (prog->type == BPF_PROG_TYPE_EXT) {
4042                 ret = tracing_bpf_link_attach(attr, prog);
4043                 goto out;
4044         }
4045
4046         ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4047         if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4048                 ret = -EINVAL;
4049                 goto out;
4050         }
4051
4052         switch (ptype) {
4053         case BPF_PROG_TYPE_CGROUP_SKB:
4054         case BPF_PROG_TYPE_CGROUP_SOCK:
4055         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4056         case BPF_PROG_TYPE_SOCK_OPS:
4057         case BPF_PROG_TYPE_CGROUP_DEVICE:
4058         case BPF_PROG_TYPE_CGROUP_SYSCTL:
4059         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4060                 ret = cgroup_bpf_link_attach(attr, prog);
4061                 break;
4062         case BPF_PROG_TYPE_TRACING:
4063                 ret = tracing_bpf_link_attach(attr, prog);
4064                 break;
4065         case BPF_PROG_TYPE_FLOW_DISSECTOR:
4066         case BPF_PROG_TYPE_SK_LOOKUP:
4067                 ret = netns_bpf_link_create(attr, prog);
4068                 break;
4069 #ifdef CONFIG_NET
4070         case BPF_PROG_TYPE_XDP:
4071                 ret = bpf_xdp_link_attach(attr, prog);
4072                 break;
4073 #endif
4074         default:
4075                 ret = -EINVAL;
4076         }
4077
4078 out:
4079         if (ret < 0)
4080                 bpf_prog_put(prog);
4081         return ret;
4082 }
4083
4084 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
4085
4086 static int link_update(union bpf_attr *attr)
4087 {
4088         struct bpf_prog *old_prog = NULL, *new_prog;
4089         struct bpf_link *link;
4090         u32 flags;
4091         int ret;
4092
4093         if (CHECK_ATTR(BPF_LINK_UPDATE))
4094                 return -EINVAL;
4095
4096         flags = attr->link_update.flags;
4097         if (flags & ~BPF_F_REPLACE)
4098                 return -EINVAL;
4099
4100         link = bpf_link_get_from_fd(attr->link_update.link_fd);
4101         if (IS_ERR(link))
4102                 return PTR_ERR(link);
4103
4104         new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
4105         if (IS_ERR(new_prog)) {
4106                 ret = PTR_ERR(new_prog);
4107                 goto out_put_link;
4108         }
4109
4110         if (flags & BPF_F_REPLACE) {
4111                 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
4112                 if (IS_ERR(old_prog)) {
4113                         ret = PTR_ERR(old_prog);
4114                         old_prog = NULL;
4115                         goto out_put_progs;
4116                 }
4117         } else if (attr->link_update.old_prog_fd) {
4118                 ret = -EINVAL;
4119                 goto out_put_progs;
4120         }
4121
4122         if (link->ops->update_prog)
4123                 ret = link->ops->update_prog(link, new_prog, old_prog);
4124         else
4125                 ret = -EINVAL;
4126
4127 out_put_progs:
4128         if (old_prog)
4129                 bpf_prog_put(old_prog);
4130         if (ret)
4131                 bpf_prog_put(new_prog);
4132 out_put_link:
4133         bpf_link_put(link);
4134         return ret;
4135 }
4136
4137 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4138
4139 static int link_detach(union bpf_attr *attr)
4140 {
4141         struct bpf_link *link;
4142         int ret;
4143
4144         if (CHECK_ATTR(BPF_LINK_DETACH))
4145                 return -EINVAL;
4146
4147         link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4148         if (IS_ERR(link))
4149                 return PTR_ERR(link);
4150
4151         if (link->ops->detach)
4152                 ret = link->ops->detach(link);
4153         else
4154                 ret = -EOPNOTSUPP;
4155
4156         bpf_link_put(link);
4157         return ret;
4158 }
4159
4160 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4161 {
4162         return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4163 }
4164
4165 struct bpf_link *bpf_link_by_id(u32 id)
4166 {
4167         struct bpf_link *link;
4168
4169         if (!id)
4170                 return ERR_PTR(-ENOENT);
4171
4172         spin_lock_bh(&link_idr_lock);
4173         /* before link is "settled", ID is 0, pretend it doesn't exist yet */
4174         link = idr_find(&link_idr, id);
4175         if (link) {
4176                 if (link->id)
4177                         link = bpf_link_inc_not_zero(link);
4178                 else
4179                         link = ERR_PTR(-EAGAIN);
4180         } else {
4181                 link = ERR_PTR(-ENOENT);
4182         }
4183         spin_unlock_bh(&link_idr_lock);
4184         return link;
4185 }
4186
4187 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4188
4189 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4190 {
4191         struct bpf_link *link;
4192         u32 id = attr->link_id;
4193         int fd;
4194
4195         if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4196                 return -EINVAL;
4197
4198         if (!capable(CAP_SYS_ADMIN))
4199                 return -EPERM;
4200
4201         link = bpf_link_by_id(id);
4202         if (IS_ERR(link))
4203                 return PTR_ERR(link);
4204
4205         fd = bpf_link_new_fd(link);
4206         if (fd < 0)
4207                 bpf_link_put(link);
4208
4209         return fd;
4210 }
4211
4212 DEFINE_MUTEX(bpf_stats_enabled_mutex);
4213
4214 static int bpf_stats_release(struct inode *inode, struct file *file)
4215 {
4216         mutex_lock(&bpf_stats_enabled_mutex);
4217         static_key_slow_dec(&bpf_stats_enabled_key.key);
4218         mutex_unlock(&bpf_stats_enabled_mutex);
4219         return 0;
4220 }
4221
4222 static const struct file_operations bpf_stats_fops = {
4223         .release = bpf_stats_release,
4224 };
4225
4226 static int bpf_enable_runtime_stats(void)
4227 {
4228         int fd;
4229
4230         mutex_lock(&bpf_stats_enabled_mutex);
4231
4232         /* Set a very high limit to avoid overflow */
4233         if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4234                 mutex_unlock(&bpf_stats_enabled_mutex);
4235                 return -EBUSY;
4236         }
4237
4238         fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4239         if (fd >= 0)
4240                 static_key_slow_inc(&bpf_stats_enabled_key.key);
4241
4242         mutex_unlock(&bpf_stats_enabled_mutex);
4243         return fd;
4244 }
4245
4246 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4247
4248 static int bpf_enable_stats(union bpf_attr *attr)
4249 {
4250
4251         if (CHECK_ATTR(BPF_ENABLE_STATS))
4252                 return -EINVAL;
4253
4254         if (!capable(CAP_SYS_ADMIN))
4255                 return -EPERM;
4256
4257         switch (attr->enable_stats.type) {
4258         case BPF_STATS_RUN_TIME:
4259                 return bpf_enable_runtime_stats();
4260         default:
4261                 break;
4262         }
4263         return -EINVAL;
4264 }
4265
4266 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4267
4268 static int bpf_iter_create(union bpf_attr *attr)
4269 {
4270         struct bpf_link *link;
4271         int err;
4272
4273         if (CHECK_ATTR(BPF_ITER_CREATE))
4274                 return -EINVAL;
4275
4276         if (attr->iter_create.flags)
4277                 return -EINVAL;
4278
4279         link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4280         if (IS_ERR(link))
4281                 return PTR_ERR(link);
4282
4283         err = bpf_iter_new_fd(link);
4284         bpf_link_put(link);
4285
4286         return err;
4287 }
4288
4289 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4290
4291 static int bpf_prog_bind_map(union bpf_attr *attr)
4292 {
4293         struct bpf_prog *prog;
4294         struct bpf_map *map;
4295         struct bpf_map **used_maps_old, **used_maps_new;
4296         int i, ret = 0;
4297
4298         if (CHECK_ATTR(BPF_PROG_BIND_MAP))
4299                 return -EINVAL;
4300
4301         if (attr->prog_bind_map.flags)
4302                 return -EINVAL;
4303
4304         prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4305         if (IS_ERR(prog))
4306                 return PTR_ERR(prog);
4307
4308         map = bpf_map_get(attr->prog_bind_map.map_fd);
4309         if (IS_ERR(map)) {
4310                 ret = PTR_ERR(map);
4311                 goto out_prog_put;
4312         }
4313
4314         mutex_lock(&prog->aux->used_maps_mutex);
4315
4316         used_maps_old = prog->aux->used_maps;
4317
4318         for (i = 0; i < prog->aux->used_map_cnt; i++)
4319                 if (used_maps_old[i] == map) {
4320                         bpf_map_put(map);
4321                         goto out_unlock;
4322                 }
4323
4324         used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
4325                                       sizeof(used_maps_new[0]),
4326                                       GFP_KERNEL);
4327         if (!used_maps_new) {
4328                 ret = -ENOMEM;
4329                 goto out_unlock;
4330         }
4331
4332         memcpy(used_maps_new, used_maps_old,
4333                sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4334         used_maps_new[prog->aux->used_map_cnt] = map;
4335
4336         prog->aux->used_map_cnt++;
4337         prog->aux->used_maps = used_maps_new;
4338
4339         kfree(used_maps_old);
4340
4341 out_unlock:
4342         mutex_unlock(&prog->aux->used_maps_mutex);
4343
4344         if (ret)
4345                 bpf_map_put(map);
4346 out_prog_put:
4347         bpf_prog_put(prog);
4348         return ret;
4349 }
4350
4351 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4352 {
4353         union bpf_attr attr;
4354         int err;
4355
4356         if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4357                 return -EPERM;
4358
4359         err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4360         if (err)
4361                 return err;
4362         size = min_t(u32, size, sizeof(attr));
4363
4364         /* copy attributes from user space, may be less than sizeof(bpf_attr) */
4365         memset(&attr, 0, sizeof(attr));
4366         if (copy_from_user(&attr, uattr, size) != 0)
4367                 return -EFAULT;
4368
4369         err = security_bpf(cmd, &attr, size);
4370         if (err < 0)
4371                 return err;
4372
4373         switch (cmd) {
4374         case BPF_MAP_CREATE:
4375                 err = map_create(&attr);
4376                 break;
4377         case BPF_MAP_LOOKUP_ELEM:
4378                 err = map_lookup_elem(&attr);
4379                 break;
4380         case BPF_MAP_UPDATE_ELEM:
4381                 err = map_update_elem(&attr);
4382                 break;
4383         case BPF_MAP_DELETE_ELEM:
4384                 err = map_delete_elem(&attr);
4385                 break;
4386         case BPF_MAP_GET_NEXT_KEY:
4387                 err = map_get_next_key(&attr);
4388                 break;
4389         case BPF_MAP_FREEZE:
4390                 err = map_freeze(&attr);
4391                 break;
4392         case BPF_PROG_LOAD:
4393                 err = bpf_prog_load(&attr, uattr);
4394                 break;
4395         case BPF_OBJ_PIN:
4396                 err = bpf_obj_pin(&attr);
4397                 break;
4398         case BPF_OBJ_GET:
4399                 err = bpf_obj_get(&attr);
4400                 break;
4401         case BPF_PROG_ATTACH:
4402                 err = bpf_prog_attach(&attr);
4403                 break;
4404         case BPF_PROG_DETACH:
4405                 err = bpf_prog_detach(&attr);
4406                 break;
4407         case BPF_PROG_QUERY:
4408                 err = bpf_prog_query(&attr, uattr);
4409                 break;
4410         case BPF_PROG_TEST_RUN:
4411                 err = bpf_prog_test_run(&attr, uattr);
4412                 break;
4413         case BPF_PROG_GET_NEXT_ID:
4414                 err = bpf_obj_get_next_id(&attr, uattr,
4415                                           &prog_idr, &prog_idr_lock);
4416                 break;
4417         case BPF_MAP_GET_NEXT_ID:
4418                 err = bpf_obj_get_next_id(&attr, uattr,
4419                                           &map_idr, &map_idr_lock);
4420                 break;
4421         case BPF_BTF_GET_NEXT_ID:
4422                 err = bpf_obj_get_next_id(&attr, uattr,
4423                                           &btf_idr, &btf_idr_lock);
4424                 break;
4425         case BPF_PROG_GET_FD_BY_ID:
4426                 err = bpf_prog_get_fd_by_id(&attr);
4427                 break;
4428         case BPF_MAP_GET_FD_BY_ID:
4429                 err = bpf_map_get_fd_by_id(&attr);
4430                 break;
4431         case BPF_OBJ_GET_INFO_BY_FD:
4432                 err = bpf_obj_get_info_by_fd(&attr, uattr);
4433                 break;
4434         case BPF_RAW_TRACEPOINT_OPEN:
4435                 err = bpf_raw_tracepoint_open(&attr);
4436                 break;
4437         case BPF_BTF_LOAD:
4438                 err = bpf_btf_load(&attr);
4439                 break;
4440         case BPF_BTF_GET_FD_BY_ID:
4441                 err = bpf_btf_get_fd_by_id(&attr);
4442                 break;
4443         case BPF_TASK_FD_QUERY:
4444                 err = bpf_task_fd_query(&attr, uattr);
4445                 break;
4446         case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4447                 err = map_lookup_and_delete_elem(&attr);
4448                 break;
4449         case BPF_MAP_LOOKUP_BATCH:
4450                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
4451                 break;
4452         case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4453                 err = bpf_map_do_batch(&attr, uattr,
4454                                        BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4455                 break;
4456         case BPF_MAP_UPDATE_BATCH:
4457                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
4458                 break;
4459         case BPF_MAP_DELETE_BATCH:
4460                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
4461                 break;
4462         case BPF_LINK_CREATE:
4463                 err = link_create(&attr);
4464                 break;
4465         case BPF_LINK_UPDATE:
4466                 err = link_update(&attr);
4467                 break;
4468         case BPF_LINK_GET_FD_BY_ID:
4469                 err = bpf_link_get_fd_by_id(&attr);
4470                 break;
4471         case BPF_LINK_GET_NEXT_ID:
4472                 err = bpf_obj_get_next_id(&attr, uattr,
4473                                           &link_idr, &link_idr_lock);
4474                 break;
4475         case BPF_ENABLE_STATS:
4476                 err = bpf_enable_stats(&attr);
4477                 break;
4478         case BPF_ITER_CREATE:
4479                 err = bpf_iter_create(&attr);
4480                 break;
4481         case BPF_LINK_DETACH:
4482                 err = link_detach(&attr);
4483                 break;
4484         case BPF_PROG_BIND_MAP:
4485                 err = bpf_prog_bind_map(&attr);
4486                 break;
4487         default:
4488                 err = -EINVAL;
4489                 break;
4490         }
4491
4492         return err;
4493 }