Merge tag 'x86-urgent-2020-08-15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / kernel / bpf / syscall.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/btf.h>
8 #include <linux/syscalls.h>
9 #include <linux/slab.h>
10 #include <linux/sched/signal.h>
11 #include <linux/vmalloc.h>
12 #include <linux/mmzone.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/fdtable.h>
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
20 #include <linux/kernel.h>
21 #include <linux/idr.h>
22 #include <linux/cred.h>
23 #include <linux/timekeeping.h>
24 #include <linux/ctype.h>
25 #include <linux/nospec.h>
26 #include <linux/audit.h>
27 #include <uapi/linux/btf.h>
28 #include <linux/pgtable.h>
29 #include <linux/bpf_lsm.h>
30 #include <linux/poll.h>
31 #include <linux/bpf-netns.h>
32
33 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
34                           (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
35                           (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
36 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
37 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
38 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
39                         IS_FD_HASH(map))
40
41 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
42
43 DEFINE_PER_CPU(int, bpf_prog_active);
44 static DEFINE_IDR(prog_idr);
45 static DEFINE_SPINLOCK(prog_idr_lock);
46 static DEFINE_IDR(map_idr);
47 static DEFINE_SPINLOCK(map_idr_lock);
48 static DEFINE_IDR(link_idr);
49 static DEFINE_SPINLOCK(link_idr_lock);
50
51 int sysctl_unprivileged_bpf_disabled __read_mostly;
52
53 static const struct bpf_map_ops * const bpf_map_types[] = {
54 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
55 #define BPF_MAP_TYPE(_id, _ops) \
56         [_id] = &_ops,
57 #define BPF_LINK_TYPE(_id, _name)
58 #include <linux/bpf_types.h>
59 #undef BPF_PROG_TYPE
60 #undef BPF_MAP_TYPE
61 #undef BPF_LINK_TYPE
62 };
63
64 /*
65  * If we're handed a bigger struct than we know of, ensure all the unknown bits
66  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
67  * we don't know about yet.
68  *
69  * There is a ToCToU between this function call and the following
70  * copy_from_user() call. However, this is not a concern since this function is
71  * meant to be a future-proofing of bits.
72  */
73 int bpf_check_uarg_tail_zero(void __user *uaddr,
74                              size_t expected_size,
75                              size_t actual_size)
76 {
77         unsigned char __user *addr = uaddr + expected_size;
78         int res;
79
80         if (unlikely(actual_size > PAGE_SIZE))  /* silly large */
81                 return -E2BIG;
82
83         if (actual_size <= expected_size)
84                 return 0;
85
86         res = check_zeroed_user(addr, actual_size - expected_size);
87         if (res < 0)
88                 return res;
89         return res ? 0 : -E2BIG;
90 }
91
92 const struct bpf_map_ops bpf_map_offload_ops = {
93         .map_alloc = bpf_map_offload_map_alloc,
94         .map_free = bpf_map_offload_map_free,
95         .map_check_btf = map_check_no_btf,
96 };
97
98 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
99 {
100         const struct bpf_map_ops *ops;
101         u32 type = attr->map_type;
102         struct bpf_map *map;
103         int err;
104
105         if (type >= ARRAY_SIZE(bpf_map_types))
106                 return ERR_PTR(-EINVAL);
107         type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
108         ops = bpf_map_types[type];
109         if (!ops)
110                 return ERR_PTR(-EINVAL);
111
112         if (ops->map_alloc_check) {
113                 err = ops->map_alloc_check(attr);
114                 if (err)
115                         return ERR_PTR(err);
116         }
117         if (attr->map_ifindex)
118                 ops = &bpf_map_offload_ops;
119         map = ops->map_alloc(attr);
120         if (IS_ERR(map))
121                 return map;
122         map->ops = ops;
123         map->map_type = type;
124         return map;
125 }
126
127 static u32 bpf_map_value_size(struct bpf_map *map)
128 {
129         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
130             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
131             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
132             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
133                 return round_up(map->value_size, 8) * num_possible_cpus();
134         else if (IS_FD_MAP(map))
135                 return sizeof(u32);
136         else
137                 return  map->value_size;
138 }
139
140 static void maybe_wait_bpf_programs(struct bpf_map *map)
141 {
142         /* Wait for any running BPF programs to complete so that
143          * userspace, when we return to it, knows that all programs
144          * that could be running use the new map value.
145          */
146         if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
147             map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
148                 synchronize_rcu();
149 }
150
151 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
152                                 void *value, __u64 flags)
153 {
154         int err;
155
156         /* Need to create a kthread, thus must support schedule */
157         if (bpf_map_is_dev_bound(map)) {
158                 return bpf_map_offload_update_elem(map, key, value, flags);
159         } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
160                    map->map_type == BPF_MAP_TYPE_SOCKHASH ||
161                    map->map_type == BPF_MAP_TYPE_SOCKMAP ||
162                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
163                 return map->ops->map_update_elem(map, key, value, flags);
164         } else if (IS_FD_PROG_ARRAY(map)) {
165                 return bpf_fd_array_map_update_elem(map, f.file, key, value,
166                                                     flags);
167         }
168
169         bpf_disable_instrumentation();
170         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
171             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
172                 err = bpf_percpu_hash_update(map, key, value, flags);
173         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
174                 err = bpf_percpu_array_update(map, key, value, flags);
175         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
176                 err = bpf_percpu_cgroup_storage_update(map, key, value,
177                                                        flags);
178         } else if (IS_FD_ARRAY(map)) {
179                 rcu_read_lock();
180                 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
181                                                    flags);
182                 rcu_read_unlock();
183         } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
184                 rcu_read_lock();
185                 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
186                                                   flags);
187                 rcu_read_unlock();
188         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
189                 /* rcu_read_lock() is not needed */
190                 err = bpf_fd_reuseport_array_update_elem(map, key, value,
191                                                          flags);
192         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
193                    map->map_type == BPF_MAP_TYPE_STACK) {
194                 err = map->ops->map_push_elem(map, value, flags);
195         } else {
196                 rcu_read_lock();
197                 err = map->ops->map_update_elem(map, key, value, flags);
198                 rcu_read_unlock();
199         }
200         bpf_enable_instrumentation();
201         maybe_wait_bpf_programs(map);
202
203         return err;
204 }
205
206 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
207                               __u64 flags)
208 {
209         void *ptr;
210         int err;
211
212         if (bpf_map_is_dev_bound(map))
213                 return bpf_map_offload_lookup_elem(map, key, value);
214
215         bpf_disable_instrumentation();
216         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
217             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
218                 err = bpf_percpu_hash_copy(map, key, value);
219         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
220                 err = bpf_percpu_array_copy(map, key, value);
221         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
222                 err = bpf_percpu_cgroup_storage_copy(map, key, value);
223         } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
224                 err = bpf_stackmap_copy(map, key, value);
225         } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
226                 err = bpf_fd_array_map_lookup_elem(map, key, value);
227         } else if (IS_FD_HASH(map)) {
228                 err = bpf_fd_htab_map_lookup_elem(map, key, value);
229         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
230                 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
231         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
232                    map->map_type == BPF_MAP_TYPE_STACK) {
233                 err = map->ops->map_peek_elem(map, value);
234         } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
235                 /* struct_ops map requires directly updating "value" */
236                 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
237         } else {
238                 rcu_read_lock();
239                 if (map->ops->map_lookup_elem_sys_only)
240                         ptr = map->ops->map_lookup_elem_sys_only(map, key);
241                 else
242                         ptr = map->ops->map_lookup_elem(map, key);
243                 if (IS_ERR(ptr)) {
244                         err = PTR_ERR(ptr);
245                 } else if (!ptr) {
246                         err = -ENOENT;
247                 } else {
248                         err = 0;
249                         if (flags & BPF_F_LOCK)
250                                 /* lock 'ptr' and copy everything but lock */
251                                 copy_map_value_locked(map, value, ptr, true);
252                         else
253                                 copy_map_value(map, value, ptr);
254                         /* mask lock, since value wasn't zero inited */
255                         check_and_init_map_lock(map, value);
256                 }
257                 rcu_read_unlock();
258         }
259
260         bpf_enable_instrumentation();
261         maybe_wait_bpf_programs(map);
262
263         return err;
264 }
265
266 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
267 {
268         /* We really just want to fail instead of triggering OOM killer
269          * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
270          * which is used for lower order allocation requests.
271          *
272          * It has been observed that higher order allocation requests done by
273          * vmalloc with __GFP_NORETRY being set might fail due to not trying
274          * to reclaim memory from the page cache, thus we set
275          * __GFP_RETRY_MAYFAIL to avoid such situations.
276          */
277
278         const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
279         unsigned int flags = 0;
280         unsigned long align = 1;
281         void *area;
282
283         if (size >= SIZE_MAX)
284                 return NULL;
285
286         /* kmalloc()'ed memory can't be mmap()'ed */
287         if (mmapable) {
288                 BUG_ON(!PAGE_ALIGNED(size));
289                 align = SHMLBA;
290                 flags = VM_USERMAP;
291         } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
292                 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
293                                     numa_node);
294                 if (area != NULL)
295                         return area;
296         }
297
298         return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
299                         gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
300                         flags, numa_node, __builtin_return_address(0));
301 }
302
303 void *bpf_map_area_alloc(u64 size, int numa_node)
304 {
305         return __bpf_map_area_alloc(size, numa_node, false);
306 }
307
308 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
309 {
310         return __bpf_map_area_alloc(size, numa_node, true);
311 }
312
313 void bpf_map_area_free(void *area)
314 {
315         kvfree(area);
316 }
317
318 static u32 bpf_map_flags_retain_permanent(u32 flags)
319 {
320         /* Some map creation flags are not tied to the map object but
321          * rather to the map fd instead, so they have no meaning upon
322          * map object inspection since multiple file descriptors with
323          * different (access) properties can exist here. Thus, given
324          * this has zero meaning for the map itself, lets clear these
325          * from here.
326          */
327         return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
328 }
329
330 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
331 {
332         map->map_type = attr->map_type;
333         map->key_size = attr->key_size;
334         map->value_size = attr->value_size;
335         map->max_entries = attr->max_entries;
336         map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
337         map->numa_node = bpf_map_attr_numa_node(attr);
338 }
339
340 static int bpf_charge_memlock(struct user_struct *user, u32 pages)
341 {
342         unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
343
344         if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
345                 atomic_long_sub(pages, &user->locked_vm);
346                 return -EPERM;
347         }
348         return 0;
349 }
350
351 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
352 {
353         if (user)
354                 atomic_long_sub(pages, &user->locked_vm);
355 }
356
357 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
358 {
359         u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
360         struct user_struct *user;
361         int ret;
362
363         if (size >= U32_MAX - PAGE_SIZE)
364                 return -E2BIG;
365
366         user = get_current_user();
367         ret = bpf_charge_memlock(user, pages);
368         if (ret) {
369                 free_uid(user);
370                 return ret;
371         }
372
373         mem->pages = pages;
374         mem->user = user;
375
376         return 0;
377 }
378
379 void bpf_map_charge_finish(struct bpf_map_memory *mem)
380 {
381         bpf_uncharge_memlock(mem->user, mem->pages);
382         free_uid(mem->user);
383 }
384
385 void bpf_map_charge_move(struct bpf_map_memory *dst,
386                          struct bpf_map_memory *src)
387 {
388         *dst = *src;
389
390         /* Make sure src will not be used for the redundant uncharging. */
391         memset(src, 0, sizeof(struct bpf_map_memory));
392 }
393
394 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
395 {
396         int ret;
397
398         ret = bpf_charge_memlock(map->memory.user, pages);
399         if (ret)
400                 return ret;
401         map->memory.pages += pages;
402         return ret;
403 }
404
405 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
406 {
407         bpf_uncharge_memlock(map->memory.user, pages);
408         map->memory.pages -= pages;
409 }
410
411 static int bpf_map_alloc_id(struct bpf_map *map)
412 {
413         int id;
414
415         idr_preload(GFP_KERNEL);
416         spin_lock_bh(&map_idr_lock);
417         id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
418         if (id > 0)
419                 map->id = id;
420         spin_unlock_bh(&map_idr_lock);
421         idr_preload_end();
422
423         if (WARN_ON_ONCE(!id))
424                 return -ENOSPC;
425
426         return id > 0 ? 0 : id;
427 }
428
429 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
430 {
431         unsigned long flags;
432
433         /* Offloaded maps are removed from the IDR store when their device
434          * disappears - even if someone holds an fd to them they are unusable,
435          * the memory is gone, all ops will fail; they are simply waiting for
436          * refcnt to drop to be freed.
437          */
438         if (!map->id)
439                 return;
440
441         if (do_idr_lock)
442                 spin_lock_irqsave(&map_idr_lock, flags);
443         else
444                 __acquire(&map_idr_lock);
445
446         idr_remove(&map_idr, map->id);
447         map->id = 0;
448
449         if (do_idr_lock)
450                 spin_unlock_irqrestore(&map_idr_lock, flags);
451         else
452                 __release(&map_idr_lock);
453 }
454
455 /* called from workqueue */
456 static void bpf_map_free_deferred(struct work_struct *work)
457 {
458         struct bpf_map *map = container_of(work, struct bpf_map, work);
459         struct bpf_map_memory mem;
460
461         bpf_map_charge_move(&mem, &map->memory);
462         security_bpf_map_free(map);
463         /* implementation dependent freeing */
464         map->ops->map_free(map);
465         bpf_map_charge_finish(&mem);
466 }
467
468 static void bpf_map_put_uref(struct bpf_map *map)
469 {
470         if (atomic64_dec_and_test(&map->usercnt)) {
471                 if (map->ops->map_release_uref)
472                         map->ops->map_release_uref(map);
473         }
474 }
475
476 /* decrement map refcnt and schedule it for freeing via workqueue
477  * (unrelying map implementation ops->map_free() might sleep)
478  */
479 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
480 {
481         if (atomic64_dec_and_test(&map->refcnt)) {
482                 /* bpf_map_free_id() must be called first */
483                 bpf_map_free_id(map, do_idr_lock);
484                 btf_put(map->btf);
485                 INIT_WORK(&map->work, bpf_map_free_deferred);
486                 schedule_work(&map->work);
487         }
488 }
489
490 void bpf_map_put(struct bpf_map *map)
491 {
492         __bpf_map_put(map, true);
493 }
494 EXPORT_SYMBOL_GPL(bpf_map_put);
495
496 void bpf_map_put_with_uref(struct bpf_map *map)
497 {
498         bpf_map_put_uref(map);
499         bpf_map_put(map);
500 }
501
502 static int bpf_map_release(struct inode *inode, struct file *filp)
503 {
504         struct bpf_map *map = filp->private_data;
505
506         if (map->ops->map_release)
507                 map->ops->map_release(map, filp);
508
509         bpf_map_put_with_uref(map);
510         return 0;
511 }
512
513 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
514 {
515         fmode_t mode = f.file->f_mode;
516
517         /* Our file permissions may have been overridden by global
518          * map permissions facing syscall side.
519          */
520         if (READ_ONCE(map->frozen))
521                 mode &= ~FMODE_CAN_WRITE;
522         return mode;
523 }
524
525 #ifdef CONFIG_PROC_FS
526 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
527 {
528         const struct bpf_map *map = filp->private_data;
529         const struct bpf_array *array;
530         u32 type = 0, jited = 0;
531
532         if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
533                 array = container_of(map, struct bpf_array, map);
534                 type  = array->aux->type;
535                 jited = array->aux->jited;
536         }
537
538         seq_printf(m,
539                    "map_type:\t%u\n"
540                    "key_size:\t%u\n"
541                    "value_size:\t%u\n"
542                    "max_entries:\t%u\n"
543                    "map_flags:\t%#x\n"
544                    "memlock:\t%llu\n"
545                    "map_id:\t%u\n"
546                    "frozen:\t%u\n",
547                    map->map_type,
548                    map->key_size,
549                    map->value_size,
550                    map->max_entries,
551                    map->map_flags,
552                    map->memory.pages * 1ULL << PAGE_SHIFT,
553                    map->id,
554                    READ_ONCE(map->frozen));
555         if (type) {
556                 seq_printf(m, "owner_prog_type:\t%u\n", type);
557                 seq_printf(m, "owner_jited:\t%u\n", jited);
558         }
559 }
560 #endif
561
562 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
563                               loff_t *ppos)
564 {
565         /* We need this handler such that alloc_file() enables
566          * f_mode with FMODE_CAN_READ.
567          */
568         return -EINVAL;
569 }
570
571 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
572                                size_t siz, loff_t *ppos)
573 {
574         /* We need this handler such that alloc_file() enables
575          * f_mode with FMODE_CAN_WRITE.
576          */
577         return -EINVAL;
578 }
579
580 /* called for any extra memory-mapped regions (except initial) */
581 static void bpf_map_mmap_open(struct vm_area_struct *vma)
582 {
583         struct bpf_map *map = vma->vm_file->private_data;
584
585         if (vma->vm_flags & VM_MAYWRITE) {
586                 mutex_lock(&map->freeze_mutex);
587                 map->writecnt++;
588                 mutex_unlock(&map->freeze_mutex);
589         }
590 }
591
592 /* called for all unmapped memory region (including initial) */
593 static void bpf_map_mmap_close(struct vm_area_struct *vma)
594 {
595         struct bpf_map *map = vma->vm_file->private_data;
596
597         if (vma->vm_flags & VM_MAYWRITE) {
598                 mutex_lock(&map->freeze_mutex);
599                 map->writecnt--;
600                 mutex_unlock(&map->freeze_mutex);
601         }
602 }
603
604 static const struct vm_operations_struct bpf_map_default_vmops = {
605         .open           = bpf_map_mmap_open,
606         .close          = bpf_map_mmap_close,
607 };
608
609 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
610 {
611         struct bpf_map *map = filp->private_data;
612         int err;
613
614         if (!map->ops->map_mmap || map_value_has_spin_lock(map))
615                 return -ENOTSUPP;
616
617         if (!(vma->vm_flags & VM_SHARED))
618                 return -EINVAL;
619
620         mutex_lock(&map->freeze_mutex);
621
622         if (vma->vm_flags & VM_WRITE) {
623                 if (map->frozen) {
624                         err = -EPERM;
625                         goto out;
626                 }
627                 /* map is meant to be read-only, so do not allow mapping as
628                  * writable, because it's possible to leak a writable page
629                  * reference and allows user-space to still modify it after
630                  * freezing, while verifier will assume contents do not change
631                  */
632                 if (map->map_flags & BPF_F_RDONLY_PROG) {
633                         err = -EACCES;
634                         goto out;
635                 }
636         }
637
638         /* set default open/close callbacks */
639         vma->vm_ops = &bpf_map_default_vmops;
640         vma->vm_private_data = map;
641         vma->vm_flags &= ~VM_MAYEXEC;
642         if (!(vma->vm_flags & VM_WRITE))
643                 /* disallow re-mapping with PROT_WRITE */
644                 vma->vm_flags &= ~VM_MAYWRITE;
645
646         err = map->ops->map_mmap(map, vma);
647         if (err)
648                 goto out;
649
650         if (vma->vm_flags & VM_MAYWRITE)
651                 map->writecnt++;
652 out:
653         mutex_unlock(&map->freeze_mutex);
654         return err;
655 }
656
657 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
658 {
659         struct bpf_map *map = filp->private_data;
660
661         if (map->ops->map_poll)
662                 return map->ops->map_poll(map, filp, pts);
663
664         return EPOLLERR;
665 }
666
667 const struct file_operations bpf_map_fops = {
668 #ifdef CONFIG_PROC_FS
669         .show_fdinfo    = bpf_map_show_fdinfo,
670 #endif
671         .release        = bpf_map_release,
672         .read           = bpf_dummy_read,
673         .write          = bpf_dummy_write,
674         .mmap           = bpf_map_mmap,
675         .poll           = bpf_map_poll,
676 };
677
678 int bpf_map_new_fd(struct bpf_map *map, int flags)
679 {
680         int ret;
681
682         ret = security_bpf_map(map, OPEN_FMODE(flags));
683         if (ret < 0)
684                 return ret;
685
686         return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
687                                 flags | O_CLOEXEC);
688 }
689
690 int bpf_get_file_flag(int flags)
691 {
692         if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
693                 return -EINVAL;
694         if (flags & BPF_F_RDONLY)
695                 return O_RDONLY;
696         if (flags & BPF_F_WRONLY)
697                 return O_WRONLY;
698         return O_RDWR;
699 }
700
701 /* helper macro to check that unused fields 'union bpf_attr' are zero */
702 #define CHECK_ATTR(CMD) \
703         memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
704                    sizeof(attr->CMD##_LAST_FIELD), 0, \
705                    sizeof(*attr) - \
706                    offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
707                    sizeof(attr->CMD##_LAST_FIELD)) != NULL
708
709 /* dst and src must have at least "size" number of bytes.
710  * Return strlen on success and < 0 on error.
711  */
712 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
713 {
714         const char *end = src + size;
715         const char *orig_src = src;
716
717         memset(dst, 0, size);
718         /* Copy all isalnum(), '_' and '.' chars. */
719         while (src < end && *src) {
720                 if (!isalnum(*src) &&
721                     *src != '_' && *src != '.')
722                         return -EINVAL;
723                 *dst++ = *src++;
724         }
725
726         /* No '\0' found in "size" number of bytes */
727         if (src == end)
728                 return -EINVAL;
729
730         return src - orig_src;
731 }
732
733 int map_check_no_btf(const struct bpf_map *map,
734                      const struct btf *btf,
735                      const struct btf_type *key_type,
736                      const struct btf_type *value_type)
737 {
738         return -ENOTSUPP;
739 }
740
741 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
742                          u32 btf_key_id, u32 btf_value_id)
743 {
744         const struct btf_type *key_type, *value_type;
745         u32 key_size, value_size;
746         int ret = 0;
747
748         /* Some maps allow key to be unspecified. */
749         if (btf_key_id) {
750                 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
751                 if (!key_type || key_size != map->key_size)
752                         return -EINVAL;
753         } else {
754                 key_type = btf_type_by_id(btf, 0);
755                 if (!map->ops->map_check_btf)
756                         return -EINVAL;
757         }
758
759         value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
760         if (!value_type || value_size != map->value_size)
761                 return -EINVAL;
762
763         map->spin_lock_off = btf_find_spin_lock(btf, value_type);
764
765         if (map_value_has_spin_lock(map)) {
766                 if (map->map_flags & BPF_F_RDONLY_PROG)
767                         return -EACCES;
768                 if (map->map_type != BPF_MAP_TYPE_HASH &&
769                     map->map_type != BPF_MAP_TYPE_ARRAY &&
770                     map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
771                     map->map_type != BPF_MAP_TYPE_SK_STORAGE)
772                         return -ENOTSUPP;
773                 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
774                     map->value_size) {
775                         WARN_ONCE(1,
776                                   "verifier bug spin_lock_off %d value_size %d\n",
777                                   map->spin_lock_off, map->value_size);
778                         return -EFAULT;
779                 }
780         }
781
782         if (map->ops->map_check_btf)
783                 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
784
785         return ret;
786 }
787
788 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
789 /* called via syscall */
790 static int map_create(union bpf_attr *attr)
791 {
792         int numa_node = bpf_map_attr_numa_node(attr);
793         struct bpf_map_memory mem;
794         struct bpf_map *map;
795         int f_flags;
796         int err;
797
798         err = CHECK_ATTR(BPF_MAP_CREATE);
799         if (err)
800                 return -EINVAL;
801
802         if (attr->btf_vmlinux_value_type_id) {
803                 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
804                     attr->btf_key_type_id || attr->btf_value_type_id)
805                         return -EINVAL;
806         } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
807                 return -EINVAL;
808         }
809
810         f_flags = bpf_get_file_flag(attr->map_flags);
811         if (f_flags < 0)
812                 return f_flags;
813
814         if (numa_node != NUMA_NO_NODE &&
815             ((unsigned int)numa_node >= nr_node_ids ||
816              !node_online(numa_node)))
817                 return -EINVAL;
818
819         /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
820         map = find_and_alloc_map(attr);
821         if (IS_ERR(map))
822                 return PTR_ERR(map);
823
824         err = bpf_obj_name_cpy(map->name, attr->map_name,
825                                sizeof(attr->map_name));
826         if (err < 0)
827                 goto free_map;
828
829         atomic64_set(&map->refcnt, 1);
830         atomic64_set(&map->usercnt, 1);
831         mutex_init(&map->freeze_mutex);
832
833         map->spin_lock_off = -EINVAL;
834         if (attr->btf_key_type_id || attr->btf_value_type_id ||
835             /* Even the map's value is a kernel's struct,
836              * the bpf_prog.o must have BTF to begin with
837              * to figure out the corresponding kernel's
838              * counter part.  Thus, attr->btf_fd has
839              * to be valid also.
840              */
841             attr->btf_vmlinux_value_type_id) {
842                 struct btf *btf;
843
844                 btf = btf_get_by_fd(attr->btf_fd);
845                 if (IS_ERR(btf)) {
846                         err = PTR_ERR(btf);
847                         goto free_map;
848                 }
849                 map->btf = btf;
850
851                 if (attr->btf_value_type_id) {
852                         err = map_check_btf(map, btf, attr->btf_key_type_id,
853                                             attr->btf_value_type_id);
854                         if (err)
855                                 goto free_map;
856                 }
857
858                 map->btf_key_type_id = attr->btf_key_type_id;
859                 map->btf_value_type_id = attr->btf_value_type_id;
860                 map->btf_vmlinux_value_type_id =
861                         attr->btf_vmlinux_value_type_id;
862         }
863
864         err = security_bpf_map_alloc(map);
865         if (err)
866                 goto free_map;
867
868         err = bpf_map_alloc_id(map);
869         if (err)
870                 goto free_map_sec;
871
872         err = bpf_map_new_fd(map, f_flags);
873         if (err < 0) {
874                 /* failed to allocate fd.
875                  * bpf_map_put_with_uref() is needed because the above
876                  * bpf_map_alloc_id() has published the map
877                  * to the userspace and the userspace may
878                  * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
879                  */
880                 bpf_map_put_with_uref(map);
881                 return err;
882         }
883
884         return err;
885
886 free_map_sec:
887         security_bpf_map_free(map);
888 free_map:
889         btf_put(map->btf);
890         bpf_map_charge_move(&mem, &map->memory);
891         map->ops->map_free(map);
892         bpf_map_charge_finish(&mem);
893         return err;
894 }
895
896 /* if error is returned, fd is released.
897  * On success caller should complete fd access with matching fdput()
898  */
899 struct bpf_map *__bpf_map_get(struct fd f)
900 {
901         if (!f.file)
902                 return ERR_PTR(-EBADF);
903         if (f.file->f_op != &bpf_map_fops) {
904                 fdput(f);
905                 return ERR_PTR(-EINVAL);
906         }
907
908         return f.file->private_data;
909 }
910
911 void bpf_map_inc(struct bpf_map *map)
912 {
913         atomic64_inc(&map->refcnt);
914 }
915 EXPORT_SYMBOL_GPL(bpf_map_inc);
916
917 void bpf_map_inc_with_uref(struct bpf_map *map)
918 {
919         atomic64_inc(&map->refcnt);
920         atomic64_inc(&map->usercnt);
921 }
922 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
923
924 struct bpf_map *bpf_map_get(u32 ufd)
925 {
926         struct fd f = fdget(ufd);
927         struct bpf_map *map;
928
929         map = __bpf_map_get(f);
930         if (IS_ERR(map))
931                 return map;
932
933         bpf_map_inc(map);
934         fdput(f);
935
936         return map;
937 }
938
939 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
940 {
941         struct fd f = fdget(ufd);
942         struct bpf_map *map;
943
944         map = __bpf_map_get(f);
945         if (IS_ERR(map))
946                 return map;
947
948         bpf_map_inc_with_uref(map);
949         fdput(f);
950
951         return map;
952 }
953
954 /* map_idr_lock should have been held */
955 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
956 {
957         int refold;
958
959         refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
960         if (!refold)
961                 return ERR_PTR(-ENOENT);
962         if (uref)
963                 atomic64_inc(&map->usercnt);
964
965         return map;
966 }
967
968 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
969 {
970         spin_lock_bh(&map_idr_lock);
971         map = __bpf_map_inc_not_zero(map, false);
972         spin_unlock_bh(&map_idr_lock);
973
974         return map;
975 }
976 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
977
978 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
979 {
980         return -ENOTSUPP;
981 }
982
983 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
984 {
985         if (key_size)
986                 return memdup_user(ukey, key_size);
987
988         if (ukey)
989                 return ERR_PTR(-EINVAL);
990
991         return NULL;
992 }
993
994 /* last field in 'union bpf_attr' used by this command */
995 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
996
997 static int map_lookup_elem(union bpf_attr *attr)
998 {
999         void __user *ukey = u64_to_user_ptr(attr->key);
1000         void __user *uvalue = u64_to_user_ptr(attr->value);
1001         int ufd = attr->map_fd;
1002         struct bpf_map *map;
1003         void *key, *value;
1004         u32 value_size;
1005         struct fd f;
1006         int err;
1007
1008         if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1009                 return -EINVAL;
1010
1011         if (attr->flags & ~BPF_F_LOCK)
1012                 return -EINVAL;
1013
1014         f = fdget(ufd);
1015         map = __bpf_map_get(f);
1016         if (IS_ERR(map))
1017                 return PTR_ERR(map);
1018         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1019                 err = -EPERM;
1020                 goto err_put;
1021         }
1022
1023         if ((attr->flags & BPF_F_LOCK) &&
1024             !map_value_has_spin_lock(map)) {
1025                 err = -EINVAL;
1026                 goto err_put;
1027         }
1028
1029         key = __bpf_copy_key(ukey, map->key_size);
1030         if (IS_ERR(key)) {
1031                 err = PTR_ERR(key);
1032                 goto err_put;
1033         }
1034
1035         value_size = bpf_map_value_size(map);
1036
1037         err = -ENOMEM;
1038         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1039         if (!value)
1040                 goto free_key;
1041
1042         err = bpf_map_copy_value(map, key, value, attr->flags);
1043         if (err)
1044                 goto free_value;
1045
1046         err = -EFAULT;
1047         if (copy_to_user(uvalue, value, value_size) != 0)
1048                 goto free_value;
1049
1050         err = 0;
1051
1052 free_value:
1053         kfree(value);
1054 free_key:
1055         kfree(key);
1056 err_put:
1057         fdput(f);
1058         return err;
1059 }
1060
1061
1062 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1063
1064 static int map_update_elem(union bpf_attr *attr)
1065 {
1066         void __user *ukey = u64_to_user_ptr(attr->key);
1067         void __user *uvalue = u64_to_user_ptr(attr->value);
1068         int ufd = attr->map_fd;
1069         struct bpf_map *map;
1070         void *key, *value;
1071         u32 value_size;
1072         struct fd f;
1073         int err;
1074
1075         if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1076                 return -EINVAL;
1077
1078         f = fdget(ufd);
1079         map = __bpf_map_get(f);
1080         if (IS_ERR(map))
1081                 return PTR_ERR(map);
1082         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1083                 err = -EPERM;
1084                 goto err_put;
1085         }
1086
1087         if ((attr->flags & BPF_F_LOCK) &&
1088             !map_value_has_spin_lock(map)) {
1089                 err = -EINVAL;
1090                 goto err_put;
1091         }
1092
1093         key = __bpf_copy_key(ukey, map->key_size);
1094         if (IS_ERR(key)) {
1095                 err = PTR_ERR(key);
1096                 goto err_put;
1097         }
1098
1099         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1100             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1101             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
1102             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
1103                 value_size = round_up(map->value_size, 8) * num_possible_cpus();
1104         else
1105                 value_size = map->value_size;
1106
1107         err = -ENOMEM;
1108         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1109         if (!value)
1110                 goto free_key;
1111
1112         err = -EFAULT;
1113         if (copy_from_user(value, uvalue, value_size) != 0)
1114                 goto free_value;
1115
1116         err = bpf_map_update_value(map, f, key, value, attr->flags);
1117
1118 free_value:
1119         kfree(value);
1120 free_key:
1121         kfree(key);
1122 err_put:
1123         fdput(f);
1124         return err;
1125 }
1126
1127 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1128
1129 static int map_delete_elem(union bpf_attr *attr)
1130 {
1131         void __user *ukey = u64_to_user_ptr(attr->key);
1132         int ufd = attr->map_fd;
1133         struct bpf_map *map;
1134         struct fd f;
1135         void *key;
1136         int err;
1137
1138         if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1139                 return -EINVAL;
1140
1141         f = fdget(ufd);
1142         map = __bpf_map_get(f);
1143         if (IS_ERR(map))
1144                 return PTR_ERR(map);
1145         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1146                 err = -EPERM;
1147                 goto err_put;
1148         }
1149
1150         key = __bpf_copy_key(ukey, map->key_size);
1151         if (IS_ERR(key)) {
1152                 err = PTR_ERR(key);
1153                 goto err_put;
1154         }
1155
1156         if (bpf_map_is_dev_bound(map)) {
1157                 err = bpf_map_offload_delete_elem(map, key);
1158                 goto out;
1159         } else if (IS_FD_PROG_ARRAY(map) ||
1160                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1161                 /* These maps require sleepable context */
1162                 err = map->ops->map_delete_elem(map, key);
1163                 goto out;
1164         }
1165
1166         bpf_disable_instrumentation();
1167         rcu_read_lock();
1168         err = map->ops->map_delete_elem(map, key);
1169         rcu_read_unlock();
1170         bpf_enable_instrumentation();
1171         maybe_wait_bpf_programs(map);
1172 out:
1173         kfree(key);
1174 err_put:
1175         fdput(f);
1176         return err;
1177 }
1178
1179 /* last field in 'union bpf_attr' used by this command */
1180 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1181
1182 static int map_get_next_key(union bpf_attr *attr)
1183 {
1184         void __user *ukey = u64_to_user_ptr(attr->key);
1185         void __user *unext_key = u64_to_user_ptr(attr->next_key);
1186         int ufd = attr->map_fd;
1187         struct bpf_map *map;
1188         void *key, *next_key;
1189         struct fd f;
1190         int err;
1191
1192         if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1193                 return -EINVAL;
1194
1195         f = fdget(ufd);
1196         map = __bpf_map_get(f);
1197         if (IS_ERR(map))
1198                 return PTR_ERR(map);
1199         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1200                 err = -EPERM;
1201                 goto err_put;
1202         }
1203
1204         if (ukey) {
1205                 key = __bpf_copy_key(ukey, map->key_size);
1206                 if (IS_ERR(key)) {
1207                         err = PTR_ERR(key);
1208                         goto err_put;
1209                 }
1210         } else {
1211                 key = NULL;
1212         }
1213
1214         err = -ENOMEM;
1215         next_key = kmalloc(map->key_size, GFP_USER);
1216         if (!next_key)
1217                 goto free_key;
1218
1219         if (bpf_map_is_dev_bound(map)) {
1220                 err = bpf_map_offload_get_next_key(map, key, next_key);
1221                 goto out;
1222         }
1223
1224         rcu_read_lock();
1225         err = map->ops->map_get_next_key(map, key, next_key);
1226         rcu_read_unlock();
1227 out:
1228         if (err)
1229                 goto free_next_key;
1230
1231         err = -EFAULT;
1232         if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1233                 goto free_next_key;
1234
1235         err = 0;
1236
1237 free_next_key:
1238         kfree(next_key);
1239 free_key:
1240         kfree(key);
1241 err_put:
1242         fdput(f);
1243         return err;
1244 }
1245
1246 int generic_map_delete_batch(struct bpf_map *map,
1247                              const union bpf_attr *attr,
1248                              union bpf_attr __user *uattr)
1249 {
1250         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1251         u32 cp, max_count;
1252         int err = 0;
1253         void *key;
1254
1255         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1256                 return -EINVAL;
1257
1258         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1259             !map_value_has_spin_lock(map)) {
1260                 return -EINVAL;
1261         }
1262
1263         max_count = attr->batch.count;
1264         if (!max_count)
1265                 return 0;
1266
1267         key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1268         if (!key)
1269                 return -ENOMEM;
1270
1271         for (cp = 0; cp < max_count; cp++) {
1272                 err = -EFAULT;
1273                 if (copy_from_user(key, keys + cp * map->key_size,
1274                                    map->key_size))
1275                         break;
1276
1277                 if (bpf_map_is_dev_bound(map)) {
1278                         err = bpf_map_offload_delete_elem(map, key);
1279                         break;
1280                 }
1281
1282                 bpf_disable_instrumentation();
1283                 rcu_read_lock();
1284                 err = map->ops->map_delete_elem(map, key);
1285                 rcu_read_unlock();
1286                 bpf_enable_instrumentation();
1287                 maybe_wait_bpf_programs(map);
1288                 if (err)
1289                         break;
1290         }
1291         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1292                 err = -EFAULT;
1293
1294         kfree(key);
1295         return err;
1296 }
1297
1298 int generic_map_update_batch(struct bpf_map *map,
1299                              const union bpf_attr *attr,
1300                              union bpf_attr __user *uattr)
1301 {
1302         void __user *values = u64_to_user_ptr(attr->batch.values);
1303         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1304         u32 value_size, cp, max_count;
1305         int ufd = attr->map_fd;
1306         void *key, *value;
1307         struct fd f;
1308         int err = 0;
1309
1310         f = fdget(ufd);
1311         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1312                 return -EINVAL;
1313
1314         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1315             !map_value_has_spin_lock(map)) {
1316                 return -EINVAL;
1317         }
1318
1319         value_size = bpf_map_value_size(map);
1320
1321         max_count = attr->batch.count;
1322         if (!max_count)
1323                 return 0;
1324
1325         key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1326         if (!key)
1327                 return -ENOMEM;
1328
1329         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1330         if (!value) {
1331                 kfree(key);
1332                 return -ENOMEM;
1333         }
1334
1335         for (cp = 0; cp < max_count; cp++) {
1336                 err = -EFAULT;
1337                 if (copy_from_user(key, keys + cp * map->key_size,
1338                     map->key_size) ||
1339                     copy_from_user(value, values + cp * value_size, value_size))
1340                         break;
1341
1342                 err = bpf_map_update_value(map, f, key, value,
1343                                            attr->batch.elem_flags);
1344
1345                 if (err)
1346                         break;
1347         }
1348
1349         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1350                 err = -EFAULT;
1351
1352         kfree(value);
1353         kfree(key);
1354         return err;
1355 }
1356
1357 #define MAP_LOOKUP_RETRIES 3
1358
1359 int generic_map_lookup_batch(struct bpf_map *map,
1360                                     const union bpf_attr *attr,
1361                                     union bpf_attr __user *uattr)
1362 {
1363         void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1364         void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1365         void __user *values = u64_to_user_ptr(attr->batch.values);
1366         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1367         void *buf, *buf_prevkey, *prev_key, *key, *value;
1368         int err, retry = MAP_LOOKUP_RETRIES;
1369         u32 value_size, cp, max_count;
1370
1371         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1372                 return -EINVAL;
1373
1374         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1375             !map_value_has_spin_lock(map))
1376                 return -EINVAL;
1377
1378         value_size = bpf_map_value_size(map);
1379
1380         max_count = attr->batch.count;
1381         if (!max_count)
1382                 return 0;
1383
1384         if (put_user(0, &uattr->batch.count))
1385                 return -EFAULT;
1386
1387         buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1388         if (!buf_prevkey)
1389                 return -ENOMEM;
1390
1391         buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1392         if (!buf) {
1393                 kfree(buf_prevkey);
1394                 return -ENOMEM;
1395         }
1396
1397         err = -EFAULT;
1398         prev_key = NULL;
1399         if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1400                 goto free_buf;
1401         key = buf;
1402         value = key + map->key_size;
1403         if (ubatch)
1404                 prev_key = buf_prevkey;
1405
1406         for (cp = 0; cp < max_count;) {
1407                 rcu_read_lock();
1408                 err = map->ops->map_get_next_key(map, prev_key, key);
1409                 rcu_read_unlock();
1410                 if (err)
1411                         break;
1412                 err = bpf_map_copy_value(map, key, value,
1413                                          attr->batch.elem_flags);
1414
1415                 if (err == -ENOENT) {
1416                         if (retry) {
1417                                 retry--;
1418                                 continue;
1419                         }
1420                         err = -EINTR;
1421                         break;
1422                 }
1423
1424                 if (err)
1425                         goto free_buf;
1426
1427                 if (copy_to_user(keys + cp * map->key_size, key,
1428                                  map->key_size)) {
1429                         err = -EFAULT;
1430                         goto free_buf;
1431                 }
1432                 if (copy_to_user(values + cp * value_size, value, value_size)) {
1433                         err = -EFAULT;
1434                         goto free_buf;
1435                 }
1436
1437                 if (!prev_key)
1438                         prev_key = buf_prevkey;
1439
1440                 swap(prev_key, key);
1441                 retry = MAP_LOOKUP_RETRIES;
1442                 cp++;
1443         }
1444
1445         if (err == -EFAULT)
1446                 goto free_buf;
1447
1448         if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1449                     (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1450                 err = -EFAULT;
1451
1452 free_buf:
1453         kfree(buf_prevkey);
1454         kfree(buf);
1455         return err;
1456 }
1457
1458 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1459
1460 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1461 {
1462         void __user *ukey = u64_to_user_ptr(attr->key);
1463         void __user *uvalue = u64_to_user_ptr(attr->value);
1464         int ufd = attr->map_fd;
1465         struct bpf_map *map;
1466         void *key, *value;
1467         u32 value_size;
1468         struct fd f;
1469         int err;
1470
1471         if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1472                 return -EINVAL;
1473
1474         f = fdget(ufd);
1475         map = __bpf_map_get(f);
1476         if (IS_ERR(map))
1477                 return PTR_ERR(map);
1478         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1479             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1480                 err = -EPERM;
1481                 goto err_put;
1482         }
1483
1484         key = __bpf_copy_key(ukey, map->key_size);
1485         if (IS_ERR(key)) {
1486                 err = PTR_ERR(key);
1487                 goto err_put;
1488         }
1489
1490         value_size = map->value_size;
1491
1492         err = -ENOMEM;
1493         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1494         if (!value)
1495                 goto free_key;
1496
1497         if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1498             map->map_type == BPF_MAP_TYPE_STACK) {
1499                 err = map->ops->map_pop_elem(map, value);
1500         } else {
1501                 err = -ENOTSUPP;
1502         }
1503
1504         if (err)
1505                 goto free_value;
1506
1507         if (copy_to_user(uvalue, value, value_size) != 0) {
1508                 err = -EFAULT;
1509                 goto free_value;
1510         }
1511
1512         err = 0;
1513
1514 free_value:
1515         kfree(value);
1516 free_key:
1517         kfree(key);
1518 err_put:
1519         fdput(f);
1520         return err;
1521 }
1522
1523 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1524
1525 static int map_freeze(const union bpf_attr *attr)
1526 {
1527         int err = 0, ufd = attr->map_fd;
1528         struct bpf_map *map;
1529         struct fd f;
1530
1531         if (CHECK_ATTR(BPF_MAP_FREEZE))
1532                 return -EINVAL;
1533
1534         f = fdget(ufd);
1535         map = __bpf_map_get(f);
1536         if (IS_ERR(map))
1537                 return PTR_ERR(map);
1538
1539         if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1540                 fdput(f);
1541                 return -ENOTSUPP;
1542         }
1543
1544         mutex_lock(&map->freeze_mutex);
1545
1546         if (map->writecnt) {
1547                 err = -EBUSY;
1548                 goto err_put;
1549         }
1550         if (READ_ONCE(map->frozen)) {
1551                 err = -EBUSY;
1552                 goto err_put;
1553         }
1554         if (!bpf_capable()) {
1555                 err = -EPERM;
1556                 goto err_put;
1557         }
1558
1559         WRITE_ONCE(map->frozen, true);
1560 err_put:
1561         mutex_unlock(&map->freeze_mutex);
1562         fdput(f);
1563         return err;
1564 }
1565
1566 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1567 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1568         [_id] = & _name ## _prog_ops,
1569 #define BPF_MAP_TYPE(_id, _ops)
1570 #define BPF_LINK_TYPE(_id, _name)
1571 #include <linux/bpf_types.h>
1572 #undef BPF_PROG_TYPE
1573 #undef BPF_MAP_TYPE
1574 #undef BPF_LINK_TYPE
1575 };
1576
1577 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1578 {
1579         const struct bpf_prog_ops *ops;
1580
1581         if (type >= ARRAY_SIZE(bpf_prog_types))
1582                 return -EINVAL;
1583         type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1584         ops = bpf_prog_types[type];
1585         if (!ops)
1586                 return -EINVAL;
1587
1588         if (!bpf_prog_is_dev_bound(prog->aux))
1589                 prog->aux->ops = ops;
1590         else
1591                 prog->aux->ops = &bpf_offload_prog_ops;
1592         prog->type = type;
1593         return 0;
1594 }
1595
1596 enum bpf_audit {
1597         BPF_AUDIT_LOAD,
1598         BPF_AUDIT_UNLOAD,
1599         BPF_AUDIT_MAX,
1600 };
1601
1602 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1603         [BPF_AUDIT_LOAD]   = "LOAD",
1604         [BPF_AUDIT_UNLOAD] = "UNLOAD",
1605 };
1606
1607 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1608 {
1609         struct audit_context *ctx = NULL;
1610         struct audit_buffer *ab;
1611
1612         if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1613                 return;
1614         if (audit_enabled == AUDIT_OFF)
1615                 return;
1616         if (op == BPF_AUDIT_LOAD)
1617                 ctx = audit_context();
1618         ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1619         if (unlikely(!ab))
1620                 return;
1621         audit_log_format(ab, "prog-id=%u op=%s",
1622                          prog->aux->id, bpf_audit_str[op]);
1623         audit_log_end(ab);
1624 }
1625
1626 int __bpf_prog_charge(struct user_struct *user, u32 pages)
1627 {
1628         unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1629         unsigned long user_bufs;
1630
1631         if (user) {
1632                 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
1633                 if (user_bufs > memlock_limit) {
1634                         atomic_long_sub(pages, &user->locked_vm);
1635                         return -EPERM;
1636                 }
1637         }
1638
1639         return 0;
1640 }
1641
1642 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1643 {
1644         if (user)
1645                 atomic_long_sub(pages, &user->locked_vm);
1646 }
1647
1648 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
1649 {
1650         struct user_struct *user = get_current_user();
1651         int ret;
1652
1653         ret = __bpf_prog_charge(user, prog->pages);
1654         if (ret) {
1655                 free_uid(user);
1656                 return ret;
1657         }
1658
1659         prog->aux->user = user;
1660         return 0;
1661 }
1662
1663 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
1664 {
1665         struct user_struct *user = prog->aux->user;
1666
1667         __bpf_prog_uncharge(user, prog->pages);
1668         free_uid(user);
1669 }
1670
1671 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1672 {
1673         int id;
1674
1675         idr_preload(GFP_KERNEL);
1676         spin_lock_bh(&prog_idr_lock);
1677         id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1678         if (id > 0)
1679                 prog->aux->id = id;
1680         spin_unlock_bh(&prog_idr_lock);
1681         idr_preload_end();
1682
1683         /* id is in [1, INT_MAX) */
1684         if (WARN_ON_ONCE(!id))
1685                 return -ENOSPC;
1686
1687         return id > 0 ? 0 : id;
1688 }
1689
1690 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1691 {
1692         /* cBPF to eBPF migrations are currently not in the idr store.
1693          * Offloaded programs are removed from the store when their device
1694          * disappears - even if someone grabs an fd to them they are unusable,
1695          * simply waiting for refcnt to drop to be freed.
1696          */
1697         if (!prog->aux->id)
1698                 return;
1699
1700         if (do_idr_lock)
1701                 spin_lock_bh(&prog_idr_lock);
1702         else
1703                 __acquire(&prog_idr_lock);
1704
1705         idr_remove(&prog_idr, prog->aux->id);
1706         prog->aux->id = 0;
1707
1708         if (do_idr_lock)
1709                 spin_unlock_bh(&prog_idr_lock);
1710         else
1711                 __release(&prog_idr_lock);
1712 }
1713
1714 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1715 {
1716         struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1717
1718         kvfree(aux->func_info);
1719         kfree(aux->func_info_aux);
1720         bpf_prog_uncharge_memlock(aux->prog);
1721         security_bpf_prog_free(aux);
1722         bpf_prog_free(aux->prog);
1723 }
1724
1725 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1726 {
1727         bpf_prog_kallsyms_del_all(prog);
1728         btf_put(prog->aux->btf);
1729         bpf_prog_free_linfo(prog);
1730
1731         if (deferred)
1732                 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1733         else
1734                 __bpf_prog_put_rcu(&prog->aux->rcu);
1735 }
1736
1737 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1738 {
1739         if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1740                 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1741                 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1742                 /* bpf_prog_free_id() must be called first */
1743                 bpf_prog_free_id(prog, do_idr_lock);
1744                 __bpf_prog_put_noref(prog, true);
1745         }
1746 }
1747
1748 void bpf_prog_put(struct bpf_prog *prog)
1749 {
1750         __bpf_prog_put(prog, true);
1751 }
1752 EXPORT_SYMBOL_GPL(bpf_prog_put);
1753
1754 static int bpf_prog_release(struct inode *inode, struct file *filp)
1755 {
1756         struct bpf_prog *prog = filp->private_data;
1757
1758         bpf_prog_put(prog);
1759         return 0;
1760 }
1761
1762 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1763                                struct bpf_prog_stats *stats)
1764 {
1765         u64 nsecs = 0, cnt = 0;
1766         int cpu;
1767
1768         for_each_possible_cpu(cpu) {
1769                 const struct bpf_prog_stats *st;
1770                 unsigned int start;
1771                 u64 tnsecs, tcnt;
1772
1773                 st = per_cpu_ptr(prog->aux->stats, cpu);
1774                 do {
1775                         start = u64_stats_fetch_begin_irq(&st->syncp);
1776                         tnsecs = st->nsecs;
1777                         tcnt = st->cnt;
1778                 } while (u64_stats_fetch_retry_irq(&st->syncp, start));
1779                 nsecs += tnsecs;
1780                 cnt += tcnt;
1781         }
1782         stats->nsecs = nsecs;
1783         stats->cnt = cnt;
1784 }
1785
1786 #ifdef CONFIG_PROC_FS
1787 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1788 {
1789         const struct bpf_prog *prog = filp->private_data;
1790         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1791         struct bpf_prog_stats stats;
1792
1793         bpf_prog_get_stats(prog, &stats);
1794         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1795         seq_printf(m,
1796                    "prog_type:\t%u\n"
1797                    "prog_jited:\t%u\n"
1798                    "prog_tag:\t%s\n"
1799                    "memlock:\t%llu\n"
1800                    "prog_id:\t%u\n"
1801                    "run_time_ns:\t%llu\n"
1802                    "run_cnt:\t%llu\n",
1803                    prog->type,
1804                    prog->jited,
1805                    prog_tag,
1806                    prog->pages * 1ULL << PAGE_SHIFT,
1807                    prog->aux->id,
1808                    stats.nsecs,
1809                    stats.cnt);
1810 }
1811 #endif
1812
1813 const struct file_operations bpf_prog_fops = {
1814 #ifdef CONFIG_PROC_FS
1815         .show_fdinfo    = bpf_prog_show_fdinfo,
1816 #endif
1817         .release        = bpf_prog_release,
1818         .read           = bpf_dummy_read,
1819         .write          = bpf_dummy_write,
1820 };
1821
1822 int bpf_prog_new_fd(struct bpf_prog *prog)
1823 {
1824         int ret;
1825
1826         ret = security_bpf_prog(prog);
1827         if (ret < 0)
1828                 return ret;
1829
1830         return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1831                                 O_RDWR | O_CLOEXEC);
1832 }
1833
1834 static struct bpf_prog *____bpf_prog_get(struct fd f)
1835 {
1836         if (!f.file)
1837                 return ERR_PTR(-EBADF);
1838         if (f.file->f_op != &bpf_prog_fops) {
1839                 fdput(f);
1840                 return ERR_PTR(-EINVAL);
1841         }
1842
1843         return f.file->private_data;
1844 }
1845
1846 void bpf_prog_add(struct bpf_prog *prog, int i)
1847 {
1848         atomic64_add(i, &prog->aux->refcnt);
1849 }
1850 EXPORT_SYMBOL_GPL(bpf_prog_add);
1851
1852 void bpf_prog_sub(struct bpf_prog *prog, int i)
1853 {
1854         /* Only to be used for undoing previous bpf_prog_add() in some
1855          * error path. We still know that another entity in our call
1856          * path holds a reference to the program, thus atomic_sub() can
1857          * be safely used in such cases!
1858          */
1859         WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1860 }
1861 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1862
1863 void bpf_prog_inc(struct bpf_prog *prog)
1864 {
1865         atomic64_inc(&prog->aux->refcnt);
1866 }
1867 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1868
1869 /* prog_idr_lock should have been held */
1870 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1871 {
1872         int refold;
1873
1874         refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1875
1876         if (!refold)
1877                 return ERR_PTR(-ENOENT);
1878
1879         return prog;
1880 }
1881 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1882
1883 bool bpf_prog_get_ok(struct bpf_prog *prog,
1884                             enum bpf_prog_type *attach_type, bool attach_drv)
1885 {
1886         /* not an attachment, just a refcount inc, always allow */
1887         if (!attach_type)
1888                 return true;
1889
1890         if (prog->type != *attach_type)
1891                 return false;
1892         if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1893                 return false;
1894
1895         return true;
1896 }
1897
1898 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1899                                        bool attach_drv)
1900 {
1901         struct fd f = fdget(ufd);
1902         struct bpf_prog *prog;
1903
1904         prog = ____bpf_prog_get(f);
1905         if (IS_ERR(prog))
1906                 return prog;
1907         if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1908                 prog = ERR_PTR(-EINVAL);
1909                 goto out;
1910         }
1911
1912         bpf_prog_inc(prog);
1913 out:
1914         fdput(f);
1915         return prog;
1916 }
1917
1918 struct bpf_prog *bpf_prog_get(u32 ufd)
1919 {
1920         return __bpf_prog_get(ufd, NULL, false);
1921 }
1922
1923 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1924                                        bool attach_drv)
1925 {
1926         return __bpf_prog_get(ufd, &type, attach_drv);
1927 }
1928 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1929
1930 /* Initially all BPF programs could be loaded w/o specifying
1931  * expected_attach_type. Later for some of them specifying expected_attach_type
1932  * at load time became required so that program could be validated properly.
1933  * Programs of types that are allowed to be loaded both w/ and w/o (for
1934  * backward compatibility) expected_attach_type, should have the default attach
1935  * type assigned to expected_attach_type for the latter case, so that it can be
1936  * validated later at attach time.
1937  *
1938  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1939  * prog type requires it but has some attach types that have to be backward
1940  * compatible.
1941  */
1942 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1943 {
1944         switch (attr->prog_type) {
1945         case BPF_PROG_TYPE_CGROUP_SOCK:
1946                 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1947                  * exist so checking for non-zero is the way to go here.
1948                  */
1949                 if (!attr->expected_attach_type)
1950                         attr->expected_attach_type =
1951                                 BPF_CGROUP_INET_SOCK_CREATE;
1952                 break;
1953         }
1954 }
1955
1956 static int
1957 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1958                            enum bpf_attach_type expected_attach_type,
1959                            u32 btf_id, u32 prog_fd)
1960 {
1961         if (btf_id) {
1962                 if (btf_id > BTF_MAX_TYPE)
1963                         return -EINVAL;
1964
1965                 switch (prog_type) {
1966                 case BPF_PROG_TYPE_TRACING:
1967                 case BPF_PROG_TYPE_LSM:
1968                 case BPF_PROG_TYPE_STRUCT_OPS:
1969                 case BPF_PROG_TYPE_EXT:
1970                         break;
1971                 default:
1972                         return -EINVAL;
1973                 }
1974         }
1975
1976         if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING &&
1977             prog_type != BPF_PROG_TYPE_EXT)
1978                 return -EINVAL;
1979
1980         switch (prog_type) {
1981         case BPF_PROG_TYPE_CGROUP_SOCK:
1982                 switch (expected_attach_type) {
1983                 case BPF_CGROUP_INET_SOCK_CREATE:
1984                 case BPF_CGROUP_INET_SOCK_RELEASE:
1985                 case BPF_CGROUP_INET4_POST_BIND:
1986                 case BPF_CGROUP_INET6_POST_BIND:
1987                         return 0;
1988                 default:
1989                         return -EINVAL;
1990                 }
1991         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1992                 switch (expected_attach_type) {
1993                 case BPF_CGROUP_INET4_BIND:
1994                 case BPF_CGROUP_INET6_BIND:
1995                 case BPF_CGROUP_INET4_CONNECT:
1996                 case BPF_CGROUP_INET6_CONNECT:
1997                 case BPF_CGROUP_INET4_GETPEERNAME:
1998                 case BPF_CGROUP_INET6_GETPEERNAME:
1999                 case BPF_CGROUP_INET4_GETSOCKNAME:
2000                 case BPF_CGROUP_INET6_GETSOCKNAME:
2001                 case BPF_CGROUP_UDP4_SENDMSG:
2002                 case BPF_CGROUP_UDP6_SENDMSG:
2003                 case BPF_CGROUP_UDP4_RECVMSG:
2004                 case BPF_CGROUP_UDP6_RECVMSG:
2005                         return 0;
2006                 default:
2007                         return -EINVAL;
2008                 }
2009         case BPF_PROG_TYPE_CGROUP_SKB:
2010                 switch (expected_attach_type) {
2011                 case BPF_CGROUP_INET_INGRESS:
2012                 case BPF_CGROUP_INET_EGRESS:
2013                         return 0;
2014                 default:
2015                         return -EINVAL;
2016                 }
2017         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2018                 switch (expected_attach_type) {
2019                 case BPF_CGROUP_SETSOCKOPT:
2020                 case BPF_CGROUP_GETSOCKOPT:
2021                         return 0;
2022                 default:
2023                         return -EINVAL;
2024                 }
2025         case BPF_PROG_TYPE_SK_LOOKUP:
2026                 if (expected_attach_type == BPF_SK_LOOKUP)
2027                         return 0;
2028                 return -EINVAL;
2029         case BPF_PROG_TYPE_EXT:
2030                 if (expected_attach_type)
2031                         return -EINVAL;
2032                 /* fallthrough */
2033         default:
2034                 return 0;
2035         }
2036 }
2037
2038 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2039 {
2040         switch (prog_type) {
2041         case BPF_PROG_TYPE_SCHED_CLS:
2042         case BPF_PROG_TYPE_SCHED_ACT:
2043         case BPF_PROG_TYPE_XDP:
2044         case BPF_PROG_TYPE_LWT_IN:
2045         case BPF_PROG_TYPE_LWT_OUT:
2046         case BPF_PROG_TYPE_LWT_XMIT:
2047         case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2048         case BPF_PROG_TYPE_SK_SKB:
2049         case BPF_PROG_TYPE_SK_MSG:
2050         case BPF_PROG_TYPE_LIRC_MODE2:
2051         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2052         case BPF_PROG_TYPE_CGROUP_DEVICE:
2053         case BPF_PROG_TYPE_CGROUP_SOCK:
2054         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2055         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2056         case BPF_PROG_TYPE_CGROUP_SYSCTL:
2057         case BPF_PROG_TYPE_SOCK_OPS:
2058         case BPF_PROG_TYPE_EXT: /* extends any prog */
2059                 return true;
2060         case BPF_PROG_TYPE_CGROUP_SKB:
2061                 /* always unpriv */
2062         case BPF_PROG_TYPE_SK_REUSEPORT:
2063                 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2064         default:
2065                 return false;
2066         }
2067 }
2068
2069 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2070 {
2071         switch (prog_type) {
2072         case BPF_PROG_TYPE_KPROBE:
2073         case BPF_PROG_TYPE_TRACEPOINT:
2074         case BPF_PROG_TYPE_PERF_EVENT:
2075         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2076         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2077         case BPF_PROG_TYPE_TRACING:
2078         case BPF_PROG_TYPE_LSM:
2079         case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2080         case BPF_PROG_TYPE_EXT: /* extends any prog */
2081                 return true;
2082         default:
2083                 return false;
2084         }
2085 }
2086
2087 /* last field in 'union bpf_attr' used by this command */
2088 #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
2089
2090 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
2091 {
2092         enum bpf_prog_type type = attr->prog_type;
2093         struct bpf_prog *prog;
2094         int err;
2095         char license[128];
2096         bool is_gpl;
2097
2098         if (CHECK_ATTR(BPF_PROG_LOAD))
2099                 return -EINVAL;
2100
2101         if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2102                                  BPF_F_ANY_ALIGNMENT |
2103                                  BPF_F_TEST_STATE_FREQ |
2104                                  BPF_F_TEST_RND_HI32))
2105                 return -EINVAL;
2106
2107         if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2108             (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2109             !bpf_capable())
2110                 return -EPERM;
2111
2112         /* copy eBPF program license from user space */
2113         if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
2114                               sizeof(license) - 1) < 0)
2115                 return -EFAULT;
2116         license[sizeof(license) - 1] = 0;
2117
2118         /* eBPF programs must be GPL compatible to use GPL-ed functions */
2119         is_gpl = license_is_gpl_compatible(license);
2120
2121         if (attr->insn_cnt == 0 ||
2122             attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2123                 return -E2BIG;
2124         if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2125             type != BPF_PROG_TYPE_CGROUP_SKB &&
2126             !bpf_capable())
2127                 return -EPERM;
2128
2129         if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2130                 return -EPERM;
2131         if (is_perfmon_prog_type(type) && !perfmon_capable())
2132                 return -EPERM;
2133
2134         bpf_prog_load_fixup_attach_type(attr);
2135         if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2136                                        attr->attach_btf_id,
2137                                        attr->attach_prog_fd))
2138                 return -EINVAL;
2139
2140         /* plain bpf_prog allocation */
2141         prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2142         if (!prog)
2143                 return -ENOMEM;
2144
2145         prog->expected_attach_type = attr->expected_attach_type;
2146         prog->aux->attach_btf_id = attr->attach_btf_id;
2147         if (attr->attach_prog_fd) {
2148                 struct bpf_prog *tgt_prog;
2149
2150                 tgt_prog = bpf_prog_get(attr->attach_prog_fd);
2151                 if (IS_ERR(tgt_prog)) {
2152                         err = PTR_ERR(tgt_prog);
2153                         goto free_prog_nouncharge;
2154                 }
2155                 prog->aux->linked_prog = tgt_prog;
2156         }
2157
2158         prog->aux->offload_requested = !!attr->prog_ifindex;
2159
2160         err = security_bpf_prog_alloc(prog->aux);
2161         if (err)
2162                 goto free_prog_nouncharge;
2163
2164         err = bpf_prog_charge_memlock(prog);
2165         if (err)
2166                 goto free_prog_sec;
2167
2168         prog->len = attr->insn_cnt;
2169
2170         err = -EFAULT;
2171         if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
2172                            bpf_prog_insn_size(prog)) != 0)
2173                 goto free_prog;
2174
2175         prog->orig_prog = NULL;
2176         prog->jited = 0;
2177
2178         atomic64_set(&prog->aux->refcnt, 1);
2179         prog->gpl_compatible = is_gpl ? 1 : 0;
2180
2181         if (bpf_prog_is_dev_bound(prog->aux)) {
2182                 err = bpf_prog_offload_init(prog, attr);
2183                 if (err)
2184                         goto free_prog;
2185         }
2186
2187         /* find program type: socket_filter vs tracing_filter */
2188         err = find_prog_type(type, prog);
2189         if (err < 0)
2190                 goto free_prog;
2191
2192         prog->aux->load_time = ktime_get_boottime_ns();
2193         err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2194                                sizeof(attr->prog_name));
2195         if (err < 0)
2196                 goto free_prog;
2197
2198         /* run eBPF verifier */
2199         err = bpf_check(&prog, attr, uattr);
2200         if (err < 0)
2201                 goto free_used_maps;
2202
2203         prog = bpf_prog_select_runtime(prog, &err);
2204         if (err < 0)
2205                 goto free_used_maps;
2206
2207         err = bpf_prog_alloc_id(prog);
2208         if (err)
2209                 goto free_used_maps;
2210
2211         /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2212          * effectively publicly exposed. However, retrieving via
2213          * bpf_prog_get_fd_by_id() will take another reference,
2214          * therefore it cannot be gone underneath us.
2215          *
2216          * Only for the time /after/ successful bpf_prog_new_fd()
2217          * and before returning to userspace, we might just hold
2218          * one reference and any parallel close on that fd could
2219          * rip everything out. Hence, below notifications must
2220          * happen before bpf_prog_new_fd().
2221          *
2222          * Also, any failure handling from this point onwards must
2223          * be using bpf_prog_put() given the program is exposed.
2224          */
2225         bpf_prog_kallsyms_add(prog);
2226         perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2227         bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2228
2229         err = bpf_prog_new_fd(prog);
2230         if (err < 0)
2231                 bpf_prog_put(prog);
2232         return err;
2233
2234 free_used_maps:
2235         /* In case we have subprogs, we need to wait for a grace
2236          * period before we can tear down JIT memory since symbols
2237          * are already exposed under kallsyms.
2238          */
2239         __bpf_prog_put_noref(prog, prog->aux->func_cnt);
2240         return err;
2241 free_prog:
2242         bpf_prog_uncharge_memlock(prog);
2243 free_prog_sec:
2244         security_bpf_prog_free(prog->aux);
2245 free_prog_nouncharge:
2246         bpf_prog_free(prog);
2247         return err;
2248 }
2249
2250 #define BPF_OBJ_LAST_FIELD file_flags
2251
2252 static int bpf_obj_pin(const union bpf_attr *attr)
2253 {
2254         if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2255                 return -EINVAL;
2256
2257         return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2258 }
2259
2260 static int bpf_obj_get(const union bpf_attr *attr)
2261 {
2262         if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2263             attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2264                 return -EINVAL;
2265
2266         return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2267                                 attr->file_flags);
2268 }
2269
2270 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2271                    const struct bpf_link_ops *ops, struct bpf_prog *prog)
2272 {
2273         atomic64_set(&link->refcnt, 1);
2274         link->type = type;
2275         link->id = 0;
2276         link->ops = ops;
2277         link->prog = prog;
2278 }
2279
2280 static void bpf_link_free_id(int id)
2281 {
2282         if (!id)
2283                 return;
2284
2285         spin_lock_bh(&link_idr_lock);
2286         idr_remove(&link_idr, id);
2287         spin_unlock_bh(&link_idr_lock);
2288 }
2289
2290 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2291  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2292  * anon_inode's release() call. This helper marksbpf_link as
2293  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2294  * is not decremented, it's the responsibility of a calling code that failed
2295  * to complete bpf_link initialization.
2296  */
2297 void bpf_link_cleanup(struct bpf_link_primer *primer)
2298 {
2299         primer->link->prog = NULL;
2300         bpf_link_free_id(primer->id);
2301         fput(primer->file);
2302         put_unused_fd(primer->fd);
2303 }
2304
2305 void bpf_link_inc(struct bpf_link *link)
2306 {
2307         atomic64_inc(&link->refcnt);
2308 }
2309
2310 /* bpf_link_free is guaranteed to be called from process context */
2311 static void bpf_link_free(struct bpf_link *link)
2312 {
2313         bpf_link_free_id(link->id);
2314         if (link->prog) {
2315                 /* detach BPF program, clean up used resources */
2316                 link->ops->release(link);
2317                 bpf_prog_put(link->prog);
2318         }
2319         /* free bpf_link and its containing memory */
2320         link->ops->dealloc(link);
2321 }
2322
2323 static void bpf_link_put_deferred(struct work_struct *work)
2324 {
2325         struct bpf_link *link = container_of(work, struct bpf_link, work);
2326
2327         bpf_link_free(link);
2328 }
2329
2330 /* bpf_link_put can be called from atomic context, but ensures that resources
2331  * are freed from process context
2332  */
2333 void bpf_link_put(struct bpf_link *link)
2334 {
2335         if (!atomic64_dec_and_test(&link->refcnt))
2336                 return;
2337
2338         if (in_atomic()) {
2339                 INIT_WORK(&link->work, bpf_link_put_deferred);
2340                 schedule_work(&link->work);
2341         } else {
2342                 bpf_link_free(link);
2343         }
2344 }
2345
2346 static int bpf_link_release(struct inode *inode, struct file *filp)
2347 {
2348         struct bpf_link *link = filp->private_data;
2349
2350         bpf_link_put(link);
2351         return 0;
2352 }
2353
2354 #ifdef CONFIG_PROC_FS
2355 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2356 #define BPF_MAP_TYPE(_id, _ops)
2357 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2358 static const char *bpf_link_type_strs[] = {
2359         [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2360 #include <linux/bpf_types.h>
2361 };
2362 #undef BPF_PROG_TYPE
2363 #undef BPF_MAP_TYPE
2364 #undef BPF_LINK_TYPE
2365
2366 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2367 {
2368         const struct bpf_link *link = filp->private_data;
2369         const struct bpf_prog *prog = link->prog;
2370         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2371
2372         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2373         seq_printf(m,
2374                    "link_type:\t%s\n"
2375                    "link_id:\t%u\n"
2376                    "prog_tag:\t%s\n"
2377                    "prog_id:\t%u\n",
2378                    bpf_link_type_strs[link->type],
2379                    link->id,
2380                    prog_tag,
2381                    prog->aux->id);
2382         if (link->ops->show_fdinfo)
2383                 link->ops->show_fdinfo(link, m);
2384 }
2385 #endif
2386
2387 static const struct file_operations bpf_link_fops = {
2388 #ifdef CONFIG_PROC_FS
2389         .show_fdinfo    = bpf_link_show_fdinfo,
2390 #endif
2391         .release        = bpf_link_release,
2392         .read           = bpf_dummy_read,
2393         .write          = bpf_dummy_write,
2394 };
2395
2396 static int bpf_link_alloc_id(struct bpf_link *link)
2397 {
2398         int id;
2399
2400         idr_preload(GFP_KERNEL);
2401         spin_lock_bh(&link_idr_lock);
2402         id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2403         spin_unlock_bh(&link_idr_lock);
2404         idr_preload_end();
2405
2406         return id;
2407 }
2408
2409 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2410  * reserving unused FD and allocating ID from link_idr. This is to be paired
2411  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2412  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2413  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2414  * transient state is passed around in struct bpf_link_primer.
2415  * This is preferred way to create and initialize bpf_link, especially when
2416  * there are complicated and expensive operations inbetween creating bpf_link
2417  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2418  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2419  * expensive (and potentially failing) roll back operations in a rare case
2420  * that file, FD, or ID can't be allocated.
2421  */
2422 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2423 {
2424         struct file *file;
2425         int fd, id;
2426
2427         fd = get_unused_fd_flags(O_CLOEXEC);
2428         if (fd < 0)
2429                 return fd;
2430
2431
2432         id = bpf_link_alloc_id(link);
2433         if (id < 0) {
2434                 put_unused_fd(fd);
2435                 return id;
2436         }
2437
2438         file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2439         if (IS_ERR(file)) {
2440                 bpf_link_free_id(id);
2441                 put_unused_fd(fd);
2442                 return PTR_ERR(file);
2443         }
2444
2445         primer->link = link;
2446         primer->file = file;
2447         primer->fd = fd;
2448         primer->id = id;
2449         return 0;
2450 }
2451
2452 int bpf_link_settle(struct bpf_link_primer *primer)
2453 {
2454         /* make bpf_link fetchable by ID */
2455         spin_lock_bh(&link_idr_lock);
2456         primer->link->id = primer->id;
2457         spin_unlock_bh(&link_idr_lock);
2458         /* make bpf_link fetchable by FD */
2459         fd_install(primer->fd, primer->file);
2460         /* pass through installed FD */
2461         return primer->fd;
2462 }
2463
2464 int bpf_link_new_fd(struct bpf_link *link)
2465 {
2466         return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2467 }
2468
2469 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2470 {
2471         struct fd f = fdget(ufd);
2472         struct bpf_link *link;
2473
2474         if (!f.file)
2475                 return ERR_PTR(-EBADF);
2476         if (f.file->f_op != &bpf_link_fops) {
2477                 fdput(f);
2478                 return ERR_PTR(-EINVAL);
2479         }
2480
2481         link = f.file->private_data;
2482         bpf_link_inc(link);
2483         fdput(f);
2484
2485         return link;
2486 }
2487
2488 struct bpf_tracing_link {
2489         struct bpf_link link;
2490         enum bpf_attach_type attach_type;
2491 };
2492
2493 static void bpf_tracing_link_release(struct bpf_link *link)
2494 {
2495         WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog));
2496 }
2497
2498 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2499 {
2500         struct bpf_tracing_link *tr_link =
2501                 container_of(link, struct bpf_tracing_link, link);
2502
2503         kfree(tr_link);
2504 }
2505
2506 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2507                                          struct seq_file *seq)
2508 {
2509         struct bpf_tracing_link *tr_link =
2510                 container_of(link, struct bpf_tracing_link, link);
2511
2512         seq_printf(seq,
2513                    "attach_type:\t%d\n",
2514                    tr_link->attach_type);
2515 }
2516
2517 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2518                                            struct bpf_link_info *info)
2519 {
2520         struct bpf_tracing_link *tr_link =
2521                 container_of(link, struct bpf_tracing_link, link);
2522
2523         info->tracing.attach_type = tr_link->attach_type;
2524
2525         return 0;
2526 }
2527
2528 static const struct bpf_link_ops bpf_tracing_link_lops = {
2529         .release = bpf_tracing_link_release,
2530         .dealloc = bpf_tracing_link_dealloc,
2531         .show_fdinfo = bpf_tracing_link_show_fdinfo,
2532         .fill_link_info = bpf_tracing_link_fill_link_info,
2533 };
2534
2535 static int bpf_tracing_prog_attach(struct bpf_prog *prog)
2536 {
2537         struct bpf_link_primer link_primer;
2538         struct bpf_tracing_link *link;
2539         int err;
2540
2541         switch (prog->type) {
2542         case BPF_PROG_TYPE_TRACING:
2543                 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2544                     prog->expected_attach_type != BPF_TRACE_FEXIT &&
2545                     prog->expected_attach_type != BPF_MODIFY_RETURN) {
2546                         err = -EINVAL;
2547                         goto out_put_prog;
2548                 }
2549                 break;
2550         case BPF_PROG_TYPE_EXT:
2551                 if (prog->expected_attach_type != 0) {
2552                         err = -EINVAL;
2553                         goto out_put_prog;
2554                 }
2555                 break;
2556         case BPF_PROG_TYPE_LSM:
2557                 if (prog->expected_attach_type != BPF_LSM_MAC) {
2558                         err = -EINVAL;
2559                         goto out_put_prog;
2560                 }
2561                 break;
2562         default:
2563                 err = -EINVAL;
2564                 goto out_put_prog;
2565         }
2566
2567         link = kzalloc(sizeof(*link), GFP_USER);
2568         if (!link) {
2569                 err = -ENOMEM;
2570                 goto out_put_prog;
2571         }
2572         bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2573                       &bpf_tracing_link_lops, prog);
2574         link->attach_type = prog->expected_attach_type;
2575
2576         err = bpf_link_prime(&link->link, &link_primer);
2577         if (err) {
2578                 kfree(link);
2579                 goto out_put_prog;
2580         }
2581
2582         err = bpf_trampoline_link_prog(prog);
2583         if (err) {
2584                 bpf_link_cleanup(&link_primer);
2585                 goto out_put_prog;
2586         }
2587
2588         return bpf_link_settle(&link_primer);
2589 out_put_prog:
2590         bpf_prog_put(prog);
2591         return err;
2592 }
2593
2594 struct bpf_raw_tp_link {
2595         struct bpf_link link;
2596         struct bpf_raw_event_map *btp;
2597 };
2598
2599 static void bpf_raw_tp_link_release(struct bpf_link *link)
2600 {
2601         struct bpf_raw_tp_link *raw_tp =
2602                 container_of(link, struct bpf_raw_tp_link, link);
2603
2604         bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2605         bpf_put_raw_tracepoint(raw_tp->btp);
2606 }
2607
2608 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2609 {
2610         struct bpf_raw_tp_link *raw_tp =
2611                 container_of(link, struct bpf_raw_tp_link, link);
2612
2613         kfree(raw_tp);
2614 }
2615
2616 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2617                                         struct seq_file *seq)
2618 {
2619         struct bpf_raw_tp_link *raw_tp_link =
2620                 container_of(link, struct bpf_raw_tp_link, link);
2621
2622         seq_printf(seq,
2623                    "tp_name:\t%s\n",
2624                    raw_tp_link->btp->tp->name);
2625 }
2626
2627 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2628                                           struct bpf_link_info *info)
2629 {
2630         struct bpf_raw_tp_link *raw_tp_link =
2631                 container_of(link, struct bpf_raw_tp_link, link);
2632         char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2633         const char *tp_name = raw_tp_link->btp->tp->name;
2634         u32 ulen = info->raw_tracepoint.tp_name_len;
2635         size_t tp_len = strlen(tp_name);
2636
2637         if (ulen && !ubuf)
2638                 return -EINVAL;
2639
2640         info->raw_tracepoint.tp_name_len = tp_len + 1;
2641
2642         if (!ubuf)
2643                 return 0;
2644
2645         if (ulen >= tp_len + 1) {
2646                 if (copy_to_user(ubuf, tp_name, tp_len + 1))
2647                         return -EFAULT;
2648         } else {
2649                 char zero = '\0';
2650
2651                 if (copy_to_user(ubuf, tp_name, ulen - 1))
2652                         return -EFAULT;
2653                 if (put_user(zero, ubuf + ulen - 1))
2654                         return -EFAULT;
2655                 return -ENOSPC;
2656         }
2657
2658         return 0;
2659 }
2660
2661 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2662         .release = bpf_raw_tp_link_release,
2663         .dealloc = bpf_raw_tp_link_dealloc,
2664         .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2665         .fill_link_info = bpf_raw_tp_link_fill_link_info,
2666 };
2667
2668 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2669
2670 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2671 {
2672         struct bpf_link_primer link_primer;
2673         struct bpf_raw_tp_link *link;
2674         struct bpf_raw_event_map *btp;
2675         struct bpf_prog *prog;
2676         const char *tp_name;
2677         char buf[128];
2678         int err;
2679
2680         if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2681                 return -EINVAL;
2682
2683         prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2684         if (IS_ERR(prog))
2685                 return PTR_ERR(prog);
2686
2687         switch (prog->type) {
2688         case BPF_PROG_TYPE_TRACING:
2689         case BPF_PROG_TYPE_EXT:
2690         case BPF_PROG_TYPE_LSM:
2691                 if (attr->raw_tracepoint.name) {
2692                         /* The attach point for this category of programs
2693                          * should be specified via btf_id during program load.
2694                          */
2695                         err = -EINVAL;
2696                         goto out_put_prog;
2697                 }
2698                 if (prog->type == BPF_PROG_TYPE_TRACING &&
2699                     prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2700                         tp_name = prog->aux->attach_func_name;
2701                         break;
2702                 }
2703                 return bpf_tracing_prog_attach(prog);
2704         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2705         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2706                 if (strncpy_from_user(buf,
2707                                       u64_to_user_ptr(attr->raw_tracepoint.name),
2708                                       sizeof(buf) - 1) < 0) {
2709                         err = -EFAULT;
2710                         goto out_put_prog;
2711                 }
2712                 buf[sizeof(buf) - 1] = 0;
2713                 tp_name = buf;
2714                 break;
2715         default:
2716                 err = -EINVAL;
2717                 goto out_put_prog;
2718         }
2719
2720         btp = bpf_get_raw_tracepoint(tp_name);
2721         if (!btp) {
2722                 err = -ENOENT;
2723                 goto out_put_prog;
2724         }
2725
2726         link = kzalloc(sizeof(*link), GFP_USER);
2727         if (!link) {
2728                 err = -ENOMEM;
2729                 goto out_put_btp;
2730         }
2731         bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
2732                       &bpf_raw_tp_link_lops, prog);
2733         link->btp = btp;
2734
2735         err = bpf_link_prime(&link->link, &link_primer);
2736         if (err) {
2737                 kfree(link);
2738                 goto out_put_btp;
2739         }
2740
2741         err = bpf_probe_register(link->btp, prog);
2742         if (err) {
2743                 bpf_link_cleanup(&link_primer);
2744                 goto out_put_btp;
2745         }
2746
2747         return bpf_link_settle(&link_primer);
2748
2749 out_put_btp:
2750         bpf_put_raw_tracepoint(btp);
2751 out_put_prog:
2752         bpf_prog_put(prog);
2753         return err;
2754 }
2755
2756 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2757                                              enum bpf_attach_type attach_type)
2758 {
2759         switch (prog->type) {
2760         case BPF_PROG_TYPE_CGROUP_SOCK:
2761         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2762         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2763         case BPF_PROG_TYPE_SK_LOOKUP:
2764                 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2765         case BPF_PROG_TYPE_CGROUP_SKB:
2766                 if (!capable(CAP_NET_ADMIN))
2767                         /* cg-skb progs can be loaded by unpriv user.
2768                          * check permissions at attach time.
2769                          */
2770                         return -EPERM;
2771                 return prog->enforce_expected_attach_type &&
2772                         prog->expected_attach_type != attach_type ?
2773                         -EINVAL : 0;
2774         default:
2775                 return 0;
2776         }
2777 }
2778
2779 static enum bpf_prog_type
2780 attach_type_to_prog_type(enum bpf_attach_type attach_type)
2781 {
2782         switch (attach_type) {
2783         case BPF_CGROUP_INET_INGRESS:
2784         case BPF_CGROUP_INET_EGRESS:
2785                 return BPF_PROG_TYPE_CGROUP_SKB;
2786                 break;
2787         case BPF_CGROUP_INET_SOCK_CREATE:
2788         case BPF_CGROUP_INET_SOCK_RELEASE:
2789         case BPF_CGROUP_INET4_POST_BIND:
2790         case BPF_CGROUP_INET6_POST_BIND:
2791                 return BPF_PROG_TYPE_CGROUP_SOCK;
2792         case BPF_CGROUP_INET4_BIND:
2793         case BPF_CGROUP_INET6_BIND:
2794         case BPF_CGROUP_INET4_CONNECT:
2795         case BPF_CGROUP_INET6_CONNECT:
2796         case BPF_CGROUP_INET4_GETPEERNAME:
2797         case BPF_CGROUP_INET6_GETPEERNAME:
2798         case BPF_CGROUP_INET4_GETSOCKNAME:
2799         case BPF_CGROUP_INET6_GETSOCKNAME:
2800         case BPF_CGROUP_UDP4_SENDMSG:
2801         case BPF_CGROUP_UDP6_SENDMSG:
2802         case BPF_CGROUP_UDP4_RECVMSG:
2803         case BPF_CGROUP_UDP6_RECVMSG:
2804                 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2805         case BPF_CGROUP_SOCK_OPS:
2806                 return BPF_PROG_TYPE_SOCK_OPS;
2807         case BPF_CGROUP_DEVICE:
2808                 return BPF_PROG_TYPE_CGROUP_DEVICE;
2809         case BPF_SK_MSG_VERDICT:
2810                 return BPF_PROG_TYPE_SK_MSG;
2811         case BPF_SK_SKB_STREAM_PARSER:
2812         case BPF_SK_SKB_STREAM_VERDICT:
2813                 return BPF_PROG_TYPE_SK_SKB;
2814         case BPF_LIRC_MODE2:
2815                 return BPF_PROG_TYPE_LIRC_MODE2;
2816         case BPF_FLOW_DISSECTOR:
2817                 return BPF_PROG_TYPE_FLOW_DISSECTOR;
2818         case BPF_CGROUP_SYSCTL:
2819                 return BPF_PROG_TYPE_CGROUP_SYSCTL;
2820         case BPF_CGROUP_GETSOCKOPT:
2821         case BPF_CGROUP_SETSOCKOPT:
2822                 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
2823         case BPF_TRACE_ITER:
2824                 return BPF_PROG_TYPE_TRACING;
2825         case BPF_SK_LOOKUP:
2826                 return BPF_PROG_TYPE_SK_LOOKUP;
2827         case BPF_XDP:
2828                 return BPF_PROG_TYPE_XDP;
2829         default:
2830                 return BPF_PROG_TYPE_UNSPEC;
2831         }
2832 }
2833
2834 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
2835
2836 #define BPF_F_ATTACH_MASK \
2837         (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
2838
2839 static int bpf_prog_attach(const union bpf_attr *attr)
2840 {
2841         enum bpf_prog_type ptype;
2842         struct bpf_prog *prog;
2843         int ret;
2844
2845         if (CHECK_ATTR(BPF_PROG_ATTACH))
2846                 return -EINVAL;
2847
2848         if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
2849                 return -EINVAL;
2850
2851         ptype = attach_type_to_prog_type(attr->attach_type);
2852         if (ptype == BPF_PROG_TYPE_UNSPEC)
2853                 return -EINVAL;
2854
2855         prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
2856         if (IS_ERR(prog))
2857                 return PTR_ERR(prog);
2858
2859         if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
2860                 bpf_prog_put(prog);
2861                 return -EINVAL;
2862         }
2863
2864         switch (ptype) {
2865         case BPF_PROG_TYPE_SK_SKB:
2866         case BPF_PROG_TYPE_SK_MSG:
2867                 ret = sock_map_get_from_fd(attr, prog);
2868                 break;
2869         case BPF_PROG_TYPE_LIRC_MODE2:
2870                 ret = lirc_prog_attach(attr, prog);
2871                 break;
2872         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2873                 ret = netns_bpf_prog_attach(attr, prog);
2874                 break;
2875         case BPF_PROG_TYPE_CGROUP_DEVICE:
2876         case BPF_PROG_TYPE_CGROUP_SKB:
2877         case BPF_PROG_TYPE_CGROUP_SOCK:
2878         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2879         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2880         case BPF_PROG_TYPE_CGROUP_SYSCTL:
2881         case BPF_PROG_TYPE_SOCK_OPS:
2882                 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
2883                 break;
2884         default:
2885                 ret = -EINVAL;
2886         }
2887
2888         if (ret)
2889                 bpf_prog_put(prog);
2890         return ret;
2891 }
2892
2893 #define BPF_PROG_DETACH_LAST_FIELD attach_type
2894
2895 static int bpf_prog_detach(const union bpf_attr *attr)
2896 {
2897         enum bpf_prog_type ptype;
2898
2899         if (CHECK_ATTR(BPF_PROG_DETACH))
2900                 return -EINVAL;
2901
2902         ptype = attach_type_to_prog_type(attr->attach_type);
2903
2904         switch (ptype) {
2905         case BPF_PROG_TYPE_SK_MSG:
2906         case BPF_PROG_TYPE_SK_SKB:
2907                 return sock_map_prog_detach(attr, ptype);
2908         case BPF_PROG_TYPE_LIRC_MODE2:
2909                 return lirc_prog_detach(attr);
2910         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2911                 return netns_bpf_prog_detach(attr, ptype);
2912         case BPF_PROG_TYPE_CGROUP_DEVICE:
2913         case BPF_PROG_TYPE_CGROUP_SKB:
2914         case BPF_PROG_TYPE_CGROUP_SOCK:
2915         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2916         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2917         case BPF_PROG_TYPE_CGROUP_SYSCTL:
2918         case BPF_PROG_TYPE_SOCK_OPS:
2919                 return cgroup_bpf_prog_detach(attr, ptype);
2920         default:
2921                 return -EINVAL;
2922         }
2923 }
2924
2925 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
2926
2927 static int bpf_prog_query(const union bpf_attr *attr,
2928                           union bpf_attr __user *uattr)
2929 {
2930         if (!capable(CAP_NET_ADMIN))
2931                 return -EPERM;
2932         if (CHECK_ATTR(BPF_PROG_QUERY))
2933                 return -EINVAL;
2934         if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
2935                 return -EINVAL;
2936
2937         switch (attr->query.attach_type) {
2938         case BPF_CGROUP_INET_INGRESS:
2939         case BPF_CGROUP_INET_EGRESS:
2940         case BPF_CGROUP_INET_SOCK_CREATE:
2941         case BPF_CGROUP_INET_SOCK_RELEASE:
2942         case BPF_CGROUP_INET4_BIND:
2943         case BPF_CGROUP_INET6_BIND:
2944         case BPF_CGROUP_INET4_POST_BIND:
2945         case BPF_CGROUP_INET6_POST_BIND:
2946         case BPF_CGROUP_INET4_CONNECT:
2947         case BPF_CGROUP_INET6_CONNECT:
2948         case BPF_CGROUP_INET4_GETPEERNAME:
2949         case BPF_CGROUP_INET6_GETPEERNAME:
2950         case BPF_CGROUP_INET4_GETSOCKNAME:
2951         case BPF_CGROUP_INET6_GETSOCKNAME:
2952         case BPF_CGROUP_UDP4_SENDMSG:
2953         case BPF_CGROUP_UDP6_SENDMSG:
2954         case BPF_CGROUP_UDP4_RECVMSG:
2955         case BPF_CGROUP_UDP6_RECVMSG:
2956         case BPF_CGROUP_SOCK_OPS:
2957         case BPF_CGROUP_DEVICE:
2958         case BPF_CGROUP_SYSCTL:
2959         case BPF_CGROUP_GETSOCKOPT:
2960         case BPF_CGROUP_SETSOCKOPT:
2961                 return cgroup_bpf_prog_query(attr, uattr);
2962         case BPF_LIRC_MODE2:
2963                 return lirc_prog_query(attr, uattr);
2964         case BPF_FLOW_DISSECTOR:
2965         case BPF_SK_LOOKUP:
2966                 return netns_bpf_prog_query(attr, uattr);
2967         default:
2968                 return -EINVAL;
2969         }
2970 }
2971
2972 #define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
2973
2974 static int bpf_prog_test_run(const union bpf_attr *attr,
2975                              union bpf_attr __user *uattr)
2976 {
2977         struct bpf_prog *prog;
2978         int ret = -ENOTSUPP;
2979
2980         if (CHECK_ATTR(BPF_PROG_TEST_RUN))
2981                 return -EINVAL;
2982
2983         if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
2984             (!attr->test.ctx_size_in && attr->test.ctx_in))
2985                 return -EINVAL;
2986
2987         if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
2988             (!attr->test.ctx_size_out && attr->test.ctx_out))
2989                 return -EINVAL;
2990
2991         prog = bpf_prog_get(attr->test.prog_fd);
2992         if (IS_ERR(prog))
2993                 return PTR_ERR(prog);
2994
2995         if (prog->aux->ops->test_run)
2996                 ret = prog->aux->ops->test_run(prog, attr, uattr);
2997
2998         bpf_prog_put(prog);
2999         return ret;
3000 }
3001
3002 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3003
3004 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3005                                union bpf_attr __user *uattr,
3006                                struct idr *idr,
3007                                spinlock_t *lock)
3008 {
3009         u32 next_id = attr->start_id;
3010         int err = 0;
3011
3012         if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3013                 return -EINVAL;
3014
3015         if (!capable(CAP_SYS_ADMIN))
3016                 return -EPERM;
3017
3018         next_id++;
3019         spin_lock_bh(lock);
3020         if (!idr_get_next(idr, &next_id))
3021                 err = -ENOENT;
3022         spin_unlock_bh(lock);
3023
3024         if (!err)
3025                 err = put_user(next_id, &uattr->next_id);
3026
3027         return err;
3028 }
3029
3030 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3031 {
3032         struct bpf_map *map;
3033
3034         spin_lock_bh(&map_idr_lock);
3035 again:
3036         map = idr_get_next(&map_idr, id);
3037         if (map) {
3038                 map = __bpf_map_inc_not_zero(map, false);
3039                 if (IS_ERR(map)) {
3040                         (*id)++;
3041                         goto again;
3042                 }
3043         }
3044         spin_unlock_bh(&map_idr_lock);
3045
3046         return map;
3047 }
3048
3049 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3050 {
3051         struct bpf_prog *prog;
3052
3053         spin_lock_bh(&prog_idr_lock);
3054 again:
3055         prog = idr_get_next(&prog_idr, id);
3056         if (prog) {
3057                 prog = bpf_prog_inc_not_zero(prog);
3058                 if (IS_ERR(prog)) {
3059                         (*id)++;
3060                         goto again;
3061                 }
3062         }
3063         spin_unlock_bh(&prog_idr_lock);
3064
3065         return prog;
3066 }
3067
3068 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3069
3070 struct bpf_prog *bpf_prog_by_id(u32 id)
3071 {
3072         struct bpf_prog *prog;
3073
3074         if (!id)
3075                 return ERR_PTR(-ENOENT);
3076
3077         spin_lock_bh(&prog_idr_lock);
3078         prog = idr_find(&prog_idr, id);
3079         if (prog)
3080                 prog = bpf_prog_inc_not_zero(prog);
3081         else
3082                 prog = ERR_PTR(-ENOENT);
3083         spin_unlock_bh(&prog_idr_lock);
3084         return prog;
3085 }
3086
3087 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3088 {
3089         struct bpf_prog *prog;
3090         u32 id = attr->prog_id;
3091         int fd;
3092
3093         if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3094                 return -EINVAL;
3095
3096         if (!capable(CAP_SYS_ADMIN))
3097                 return -EPERM;
3098
3099         prog = bpf_prog_by_id(id);
3100         if (IS_ERR(prog))
3101                 return PTR_ERR(prog);
3102
3103         fd = bpf_prog_new_fd(prog);
3104         if (fd < 0)
3105                 bpf_prog_put(prog);
3106
3107         return fd;
3108 }
3109
3110 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3111
3112 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3113 {
3114         struct bpf_map *map;
3115         u32 id = attr->map_id;
3116         int f_flags;
3117         int fd;
3118
3119         if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3120             attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3121                 return -EINVAL;
3122
3123         if (!capable(CAP_SYS_ADMIN))
3124                 return -EPERM;
3125
3126         f_flags = bpf_get_file_flag(attr->open_flags);
3127         if (f_flags < 0)
3128                 return f_flags;
3129
3130         spin_lock_bh(&map_idr_lock);
3131         map = idr_find(&map_idr, id);
3132         if (map)
3133                 map = __bpf_map_inc_not_zero(map, true);
3134         else
3135                 map = ERR_PTR(-ENOENT);
3136         spin_unlock_bh(&map_idr_lock);
3137
3138         if (IS_ERR(map))
3139                 return PTR_ERR(map);
3140
3141         fd = bpf_map_new_fd(map, f_flags);
3142         if (fd < 0)
3143                 bpf_map_put_with_uref(map);
3144
3145         return fd;
3146 }
3147
3148 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3149                                               unsigned long addr, u32 *off,
3150                                               u32 *type)
3151 {
3152         const struct bpf_map *map;
3153         int i;
3154
3155         for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3156                 map = prog->aux->used_maps[i];
3157                 if (map == (void *)addr) {
3158                         *type = BPF_PSEUDO_MAP_FD;
3159                         return map;
3160                 }
3161                 if (!map->ops->map_direct_value_meta)
3162                         continue;
3163                 if (!map->ops->map_direct_value_meta(map, addr, off)) {
3164                         *type = BPF_PSEUDO_MAP_VALUE;
3165                         return map;
3166                 }
3167         }
3168
3169         return NULL;
3170 }
3171
3172 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3173                                               const struct cred *f_cred)
3174 {
3175         const struct bpf_map *map;
3176         struct bpf_insn *insns;
3177         u32 off, type;
3178         u64 imm;
3179         u8 code;
3180         int i;
3181
3182         insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3183                         GFP_USER);
3184         if (!insns)
3185                 return insns;
3186
3187         for (i = 0; i < prog->len; i++) {
3188                 code = insns[i].code;
3189
3190                 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3191                         insns[i].code = BPF_JMP | BPF_CALL;
3192                         insns[i].imm = BPF_FUNC_tail_call;
3193                         /* fall-through */
3194                 }
3195                 if (code == (BPF_JMP | BPF_CALL) ||
3196                     code == (BPF_JMP | BPF_CALL_ARGS)) {
3197                         if (code == (BPF_JMP | BPF_CALL_ARGS))
3198                                 insns[i].code = BPF_JMP | BPF_CALL;
3199                         if (!bpf_dump_raw_ok(f_cred))
3200                                 insns[i].imm = 0;
3201                         continue;
3202                 }
3203                 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3204                         insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3205                         continue;
3206                 }
3207
3208                 if (code != (BPF_LD | BPF_IMM | BPF_DW))
3209                         continue;
3210
3211                 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3212                 map = bpf_map_from_imm(prog, imm, &off, &type);
3213                 if (map) {
3214                         insns[i].src_reg = type;
3215                         insns[i].imm = map->id;
3216                         insns[i + 1].imm = off;
3217                         continue;
3218                 }
3219         }
3220
3221         return insns;
3222 }
3223
3224 static int set_info_rec_size(struct bpf_prog_info *info)
3225 {
3226         /*
3227          * Ensure info.*_rec_size is the same as kernel expected size
3228          *
3229          * or
3230          *
3231          * Only allow zero *_rec_size if both _rec_size and _cnt are
3232          * zero.  In this case, the kernel will set the expected
3233          * _rec_size back to the info.
3234          */
3235
3236         if ((info->nr_func_info || info->func_info_rec_size) &&
3237             info->func_info_rec_size != sizeof(struct bpf_func_info))
3238                 return -EINVAL;
3239
3240         if ((info->nr_line_info || info->line_info_rec_size) &&
3241             info->line_info_rec_size != sizeof(struct bpf_line_info))
3242                 return -EINVAL;
3243
3244         if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3245             info->jited_line_info_rec_size != sizeof(__u64))
3246                 return -EINVAL;
3247
3248         info->func_info_rec_size = sizeof(struct bpf_func_info);
3249         info->line_info_rec_size = sizeof(struct bpf_line_info);
3250         info->jited_line_info_rec_size = sizeof(__u64);
3251
3252         return 0;
3253 }
3254
3255 static int bpf_prog_get_info_by_fd(struct file *file,
3256                                    struct bpf_prog *prog,
3257                                    const union bpf_attr *attr,
3258                                    union bpf_attr __user *uattr)
3259 {
3260         struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3261         struct bpf_prog_info info;
3262         u32 info_len = attr->info.info_len;
3263         struct bpf_prog_stats stats;
3264         char __user *uinsns;
3265         u32 ulen;
3266         int err;
3267
3268         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3269         if (err)
3270                 return err;
3271         info_len = min_t(u32, sizeof(info), info_len);
3272
3273         memset(&info, 0, sizeof(info));
3274         if (copy_from_user(&info, uinfo, info_len))
3275                 return -EFAULT;
3276
3277         info.type = prog->type;
3278         info.id = prog->aux->id;
3279         info.load_time = prog->aux->load_time;
3280         info.created_by_uid = from_kuid_munged(current_user_ns(),
3281                                                prog->aux->user->uid);
3282         info.gpl_compatible = prog->gpl_compatible;
3283
3284         memcpy(info.tag, prog->tag, sizeof(prog->tag));
3285         memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3286
3287         ulen = info.nr_map_ids;
3288         info.nr_map_ids = prog->aux->used_map_cnt;
3289         ulen = min_t(u32, info.nr_map_ids, ulen);
3290         if (ulen) {
3291                 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3292                 u32 i;
3293
3294                 for (i = 0; i < ulen; i++)
3295                         if (put_user(prog->aux->used_maps[i]->id,
3296                                      &user_map_ids[i]))
3297                                 return -EFAULT;
3298         }
3299
3300         err = set_info_rec_size(&info);
3301         if (err)
3302                 return err;
3303
3304         bpf_prog_get_stats(prog, &stats);
3305         info.run_time_ns = stats.nsecs;
3306         info.run_cnt = stats.cnt;
3307
3308         if (!bpf_capable()) {
3309                 info.jited_prog_len = 0;
3310                 info.xlated_prog_len = 0;
3311                 info.nr_jited_ksyms = 0;
3312                 info.nr_jited_func_lens = 0;
3313                 info.nr_func_info = 0;
3314                 info.nr_line_info = 0;
3315                 info.nr_jited_line_info = 0;
3316                 goto done;
3317         }
3318
3319         ulen = info.xlated_prog_len;
3320         info.xlated_prog_len = bpf_prog_insn_size(prog);
3321         if (info.xlated_prog_len && ulen) {
3322                 struct bpf_insn *insns_sanitized;
3323                 bool fault;
3324
3325                 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3326                         info.xlated_prog_insns = 0;
3327                         goto done;
3328                 }
3329                 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3330                 if (!insns_sanitized)
3331                         return -ENOMEM;
3332                 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3333                 ulen = min_t(u32, info.xlated_prog_len, ulen);
3334                 fault = copy_to_user(uinsns, insns_sanitized, ulen);
3335                 kfree(insns_sanitized);
3336                 if (fault)
3337                         return -EFAULT;
3338         }
3339
3340         if (bpf_prog_is_dev_bound(prog->aux)) {
3341                 err = bpf_prog_offload_info_fill(&info, prog);
3342                 if (err)
3343                         return err;
3344                 goto done;
3345         }
3346
3347         /* NOTE: the following code is supposed to be skipped for offload.
3348          * bpf_prog_offload_info_fill() is the place to fill similar fields
3349          * for offload.
3350          */
3351         ulen = info.jited_prog_len;
3352         if (prog->aux->func_cnt) {
3353                 u32 i;
3354
3355                 info.jited_prog_len = 0;
3356                 for (i = 0; i < prog->aux->func_cnt; i++)
3357                         info.jited_prog_len += prog->aux->func[i]->jited_len;
3358         } else {
3359                 info.jited_prog_len = prog->jited_len;
3360         }
3361
3362         if (info.jited_prog_len && ulen) {
3363                 if (bpf_dump_raw_ok(file->f_cred)) {
3364                         uinsns = u64_to_user_ptr(info.jited_prog_insns);
3365                         ulen = min_t(u32, info.jited_prog_len, ulen);
3366
3367                         /* for multi-function programs, copy the JITed
3368                          * instructions for all the functions
3369                          */
3370                         if (prog->aux->func_cnt) {
3371                                 u32 len, free, i;
3372                                 u8 *img;
3373
3374                                 free = ulen;
3375                                 for (i = 0; i < prog->aux->func_cnt; i++) {
3376                                         len = prog->aux->func[i]->jited_len;
3377                                         len = min_t(u32, len, free);
3378                                         img = (u8 *) prog->aux->func[i]->bpf_func;
3379                                         if (copy_to_user(uinsns, img, len))
3380                                                 return -EFAULT;
3381                                         uinsns += len;
3382                                         free -= len;
3383                                         if (!free)
3384                                                 break;
3385                                 }
3386                         } else {
3387                                 if (copy_to_user(uinsns, prog->bpf_func, ulen))
3388                                         return -EFAULT;
3389                         }
3390                 } else {
3391                         info.jited_prog_insns = 0;
3392                 }
3393         }
3394
3395         ulen = info.nr_jited_ksyms;
3396         info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3397         if (ulen) {
3398                 if (bpf_dump_raw_ok(file->f_cred)) {
3399                         unsigned long ksym_addr;
3400                         u64 __user *user_ksyms;
3401                         u32 i;
3402
3403                         /* copy the address of the kernel symbol
3404                          * corresponding to each function
3405                          */
3406                         ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3407                         user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3408                         if (prog->aux->func_cnt) {
3409                                 for (i = 0; i < ulen; i++) {
3410                                         ksym_addr = (unsigned long)
3411                                                 prog->aux->func[i]->bpf_func;
3412                                         if (put_user((u64) ksym_addr,
3413                                                      &user_ksyms[i]))
3414                                                 return -EFAULT;
3415                                 }
3416                         } else {
3417                                 ksym_addr = (unsigned long) prog->bpf_func;
3418                                 if (put_user((u64) ksym_addr, &user_ksyms[0]))
3419                                         return -EFAULT;
3420                         }
3421                 } else {
3422                         info.jited_ksyms = 0;
3423                 }
3424         }
3425
3426         ulen = info.nr_jited_func_lens;
3427         info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3428         if (ulen) {
3429                 if (bpf_dump_raw_ok(file->f_cred)) {
3430                         u32 __user *user_lens;
3431                         u32 func_len, i;
3432
3433                         /* copy the JITed image lengths for each function */
3434                         ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3435                         user_lens = u64_to_user_ptr(info.jited_func_lens);
3436                         if (prog->aux->func_cnt) {
3437                                 for (i = 0; i < ulen; i++) {
3438                                         func_len =
3439                                                 prog->aux->func[i]->jited_len;
3440                                         if (put_user(func_len, &user_lens[i]))
3441                                                 return -EFAULT;
3442                                 }
3443                         } else {
3444                                 func_len = prog->jited_len;
3445                                 if (put_user(func_len, &user_lens[0]))
3446                                         return -EFAULT;
3447                         }
3448                 } else {
3449                         info.jited_func_lens = 0;
3450                 }
3451         }
3452
3453         if (prog->aux->btf)
3454                 info.btf_id = btf_id(prog->aux->btf);
3455
3456         ulen = info.nr_func_info;
3457         info.nr_func_info = prog->aux->func_info_cnt;
3458         if (info.nr_func_info && ulen) {
3459                 char __user *user_finfo;
3460
3461                 user_finfo = u64_to_user_ptr(info.func_info);
3462                 ulen = min_t(u32, info.nr_func_info, ulen);
3463                 if (copy_to_user(user_finfo, prog->aux->func_info,
3464                                  info.func_info_rec_size * ulen))
3465                         return -EFAULT;
3466         }
3467
3468         ulen = info.nr_line_info;
3469         info.nr_line_info = prog->aux->nr_linfo;
3470         if (info.nr_line_info && ulen) {
3471                 __u8 __user *user_linfo;
3472
3473                 user_linfo = u64_to_user_ptr(info.line_info);
3474                 ulen = min_t(u32, info.nr_line_info, ulen);
3475                 if (copy_to_user(user_linfo, prog->aux->linfo,
3476                                  info.line_info_rec_size * ulen))
3477                         return -EFAULT;
3478         }
3479
3480         ulen = info.nr_jited_line_info;
3481         if (prog->aux->jited_linfo)
3482                 info.nr_jited_line_info = prog->aux->nr_linfo;
3483         else
3484                 info.nr_jited_line_info = 0;
3485         if (info.nr_jited_line_info && ulen) {
3486                 if (bpf_dump_raw_ok(file->f_cred)) {
3487                         __u64 __user *user_linfo;
3488                         u32 i;
3489
3490                         user_linfo = u64_to_user_ptr(info.jited_line_info);
3491                         ulen = min_t(u32, info.nr_jited_line_info, ulen);
3492                         for (i = 0; i < ulen; i++) {
3493                                 if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3494                                              &user_linfo[i]))
3495                                         return -EFAULT;
3496                         }
3497                 } else {
3498                         info.jited_line_info = 0;
3499                 }
3500         }
3501
3502         ulen = info.nr_prog_tags;
3503         info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3504         if (ulen) {
3505                 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3506                 u32 i;
3507
3508                 user_prog_tags = u64_to_user_ptr(info.prog_tags);
3509                 ulen = min_t(u32, info.nr_prog_tags, ulen);
3510                 if (prog->aux->func_cnt) {
3511                         for (i = 0; i < ulen; i++) {
3512                                 if (copy_to_user(user_prog_tags[i],
3513                                                  prog->aux->func[i]->tag,
3514                                                  BPF_TAG_SIZE))
3515                                         return -EFAULT;
3516                         }
3517                 } else {
3518                         if (copy_to_user(user_prog_tags[0],
3519                                          prog->tag, BPF_TAG_SIZE))
3520                                 return -EFAULT;
3521                 }
3522         }
3523
3524 done:
3525         if (copy_to_user(uinfo, &info, info_len) ||
3526             put_user(info_len, &uattr->info.info_len))
3527                 return -EFAULT;
3528
3529         return 0;
3530 }
3531
3532 static int bpf_map_get_info_by_fd(struct file *file,
3533                                   struct bpf_map *map,
3534                                   const union bpf_attr *attr,
3535                                   union bpf_attr __user *uattr)
3536 {
3537         struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3538         struct bpf_map_info info;
3539         u32 info_len = attr->info.info_len;
3540         int err;
3541
3542         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3543         if (err)
3544                 return err;
3545         info_len = min_t(u32, sizeof(info), info_len);
3546
3547         memset(&info, 0, sizeof(info));
3548         info.type = map->map_type;
3549         info.id = map->id;
3550         info.key_size = map->key_size;
3551         info.value_size = map->value_size;
3552         info.max_entries = map->max_entries;
3553         info.map_flags = map->map_flags;
3554         memcpy(info.name, map->name, sizeof(map->name));
3555
3556         if (map->btf) {
3557                 info.btf_id = btf_id(map->btf);
3558                 info.btf_key_type_id = map->btf_key_type_id;
3559                 info.btf_value_type_id = map->btf_value_type_id;
3560         }
3561         info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3562
3563         if (bpf_map_is_dev_bound(map)) {
3564                 err = bpf_map_offload_info_fill(&info, map);
3565                 if (err)
3566                         return err;
3567         }
3568
3569         if (copy_to_user(uinfo, &info, info_len) ||
3570             put_user(info_len, &uattr->info.info_len))
3571                 return -EFAULT;
3572
3573         return 0;
3574 }
3575
3576 static int bpf_btf_get_info_by_fd(struct file *file,
3577                                   struct btf *btf,
3578                                   const union bpf_attr *attr,
3579                                   union bpf_attr __user *uattr)
3580 {
3581         struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3582         u32 info_len = attr->info.info_len;
3583         int err;
3584
3585         err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
3586         if (err)
3587                 return err;
3588
3589         return btf_get_info_by_fd(btf, attr, uattr);
3590 }
3591
3592 static int bpf_link_get_info_by_fd(struct file *file,
3593                                   struct bpf_link *link,
3594                                   const union bpf_attr *attr,
3595                                   union bpf_attr __user *uattr)
3596 {
3597         struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3598         struct bpf_link_info info;
3599         u32 info_len = attr->info.info_len;
3600         int err;
3601
3602         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3603         if (err)
3604                 return err;
3605         info_len = min_t(u32, sizeof(info), info_len);
3606
3607         memset(&info, 0, sizeof(info));
3608         if (copy_from_user(&info, uinfo, info_len))
3609                 return -EFAULT;
3610
3611         info.type = link->type;
3612         info.id = link->id;
3613         info.prog_id = link->prog->aux->id;
3614
3615         if (link->ops->fill_link_info) {
3616                 err = link->ops->fill_link_info(link, &info);
3617                 if (err)
3618                         return err;
3619         }
3620
3621         if (copy_to_user(uinfo, &info, info_len) ||
3622             put_user(info_len, &uattr->info.info_len))
3623                 return -EFAULT;
3624
3625         return 0;
3626 }
3627
3628
3629 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3630
3631 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3632                                   union bpf_attr __user *uattr)
3633 {
3634         int ufd = attr->info.bpf_fd;
3635         struct fd f;
3636         int err;
3637
3638         if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3639                 return -EINVAL;
3640
3641         f = fdget(ufd);
3642         if (!f.file)
3643                 return -EBADFD;
3644
3645         if (f.file->f_op == &bpf_prog_fops)
3646                 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
3647                                               uattr);
3648         else if (f.file->f_op == &bpf_map_fops)
3649                 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
3650                                              uattr);
3651         else if (f.file->f_op == &btf_fops)
3652                 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
3653         else if (f.file->f_op == &bpf_link_fops)
3654                 err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
3655                                               attr, uattr);
3656         else
3657                 err = -EINVAL;
3658
3659         fdput(f);
3660         return err;
3661 }
3662
3663 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3664
3665 static int bpf_btf_load(const union bpf_attr *attr)
3666 {
3667         if (CHECK_ATTR(BPF_BTF_LOAD))
3668                 return -EINVAL;
3669
3670         if (!bpf_capable())
3671                 return -EPERM;
3672
3673         return btf_new_fd(attr);
3674 }
3675
3676 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3677
3678 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3679 {
3680         if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
3681                 return -EINVAL;
3682
3683         if (!capable(CAP_SYS_ADMIN))
3684                 return -EPERM;
3685
3686         return btf_get_fd_by_id(attr->btf_id);
3687 }
3688
3689 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
3690                                     union bpf_attr __user *uattr,
3691                                     u32 prog_id, u32 fd_type,
3692                                     const char *buf, u64 probe_offset,
3693                                     u64 probe_addr)
3694 {
3695         char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
3696         u32 len = buf ? strlen(buf) : 0, input_len;
3697         int err = 0;
3698
3699         if (put_user(len, &uattr->task_fd_query.buf_len))
3700                 return -EFAULT;
3701         input_len = attr->task_fd_query.buf_len;
3702         if (input_len && ubuf) {
3703                 if (!len) {
3704                         /* nothing to copy, just make ubuf NULL terminated */
3705                         char zero = '\0';
3706
3707                         if (put_user(zero, ubuf))
3708                                 return -EFAULT;
3709                 } else if (input_len >= len + 1) {
3710                         /* ubuf can hold the string with NULL terminator */
3711                         if (copy_to_user(ubuf, buf, len + 1))
3712                                 return -EFAULT;
3713                 } else {
3714                         /* ubuf cannot hold the string with NULL terminator,
3715                          * do a partial copy with NULL terminator.
3716                          */
3717                         char zero = '\0';
3718
3719                         err = -ENOSPC;
3720                         if (copy_to_user(ubuf, buf, input_len - 1))
3721                                 return -EFAULT;
3722                         if (put_user(zero, ubuf + input_len - 1))
3723                                 return -EFAULT;
3724                 }
3725         }
3726
3727         if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
3728             put_user(fd_type, &uattr->task_fd_query.fd_type) ||
3729             put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
3730             put_user(probe_addr, &uattr->task_fd_query.probe_addr))
3731                 return -EFAULT;
3732
3733         return err;
3734 }
3735
3736 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
3737
3738 static int bpf_task_fd_query(const union bpf_attr *attr,
3739                              union bpf_attr __user *uattr)
3740 {
3741         pid_t pid = attr->task_fd_query.pid;
3742         u32 fd = attr->task_fd_query.fd;
3743         const struct perf_event *event;
3744         struct files_struct *files;
3745         struct task_struct *task;
3746         struct file *file;
3747         int err;
3748
3749         if (CHECK_ATTR(BPF_TASK_FD_QUERY))
3750                 return -EINVAL;
3751
3752         if (!capable(CAP_SYS_ADMIN))
3753                 return -EPERM;
3754
3755         if (attr->task_fd_query.flags != 0)
3756                 return -EINVAL;
3757
3758         task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3759         if (!task)
3760                 return -ENOENT;
3761
3762         files = get_files_struct(task);
3763         put_task_struct(task);
3764         if (!files)
3765                 return -ENOENT;
3766
3767         err = 0;
3768         spin_lock(&files->file_lock);
3769         file = fcheck_files(files, fd);
3770         if (!file)
3771                 err = -EBADF;
3772         else
3773                 get_file(file);
3774         spin_unlock(&files->file_lock);
3775         put_files_struct(files);
3776
3777         if (err)
3778                 goto out;
3779
3780         if (file->f_op == &bpf_link_fops) {
3781                 struct bpf_link *link = file->private_data;
3782
3783                 if (link->ops == &bpf_raw_tp_link_lops) {
3784                         struct bpf_raw_tp_link *raw_tp =
3785                                 container_of(link, struct bpf_raw_tp_link, link);
3786                         struct bpf_raw_event_map *btp = raw_tp->btp;
3787
3788                         err = bpf_task_fd_query_copy(attr, uattr,
3789                                                      raw_tp->link.prog->aux->id,
3790                                                      BPF_FD_TYPE_RAW_TRACEPOINT,
3791                                                      btp->tp->name, 0, 0);
3792                         goto put_file;
3793                 }
3794                 goto out_not_supp;
3795         }
3796
3797         event = perf_get_event(file);
3798         if (!IS_ERR(event)) {
3799                 u64 probe_offset, probe_addr;
3800                 u32 prog_id, fd_type;
3801                 const char *buf;
3802
3803                 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
3804                                               &buf, &probe_offset,
3805                                               &probe_addr);
3806                 if (!err)
3807                         err = bpf_task_fd_query_copy(attr, uattr, prog_id,
3808                                                      fd_type, buf,
3809                                                      probe_offset,
3810                                                      probe_addr);
3811                 goto put_file;
3812         }
3813
3814 out_not_supp:
3815         err = -ENOTSUPP;
3816 put_file:
3817         fput(file);
3818 out:
3819         return err;
3820 }
3821
3822 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
3823
3824 #define BPF_DO_BATCH(fn)                        \
3825         do {                                    \
3826                 if (!fn) {                      \
3827                         err = -ENOTSUPP;        \
3828                         goto err_put;           \
3829                 }                               \
3830                 err = fn(map, attr, uattr);     \
3831         } while (0)
3832
3833 static int bpf_map_do_batch(const union bpf_attr *attr,
3834                             union bpf_attr __user *uattr,
3835                             int cmd)
3836 {
3837         struct bpf_map *map;
3838         int err, ufd;
3839         struct fd f;
3840
3841         if (CHECK_ATTR(BPF_MAP_BATCH))
3842                 return -EINVAL;
3843
3844         ufd = attr->batch.map_fd;
3845         f = fdget(ufd);
3846         map = __bpf_map_get(f);
3847         if (IS_ERR(map))
3848                 return PTR_ERR(map);
3849
3850         if ((cmd == BPF_MAP_LOOKUP_BATCH ||
3851              cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
3852             !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
3853                 err = -EPERM;
3854                 goto err_put;
3855         }
3856
3857         if (cmd != BPF_MAP_LOOKUP_BATCH &&
3858             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
3859                 err = -EPERM;
3860                 goto err_put;
3861         }
3862
3863         if (cmd == BPF_MAP_LOOKUP_BATCH)
3864                 BPF_DO_BATCH(map->ops->map_lookup_batch);
3865         else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
3866                 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
3867         else if (cmd == BPF_MAP_UPDATE_BATCH)
3868                 BPF_DO_BATCH(map->ops->map_update_batch);
3869         else
3870                 BPF_DO_BATCH(map->ops->map_delete_batch);
3871
3872 err_put:
3873         fdput(f);
3874         return err;
3875 }
3876
3877 static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3878 {
3879         if (attr->link_create.attach_type == BPF_TRACE_ITER &&
3880             prog->expected_attach_type == BPF_TRACE_ITER)
3881                 return bpf_iter_link_attach(attr, prog);
3882
3883         return -EINVAL;
3884 }
3885
3886 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
3887 static int link_create(union bpf_attr *attr)
3888 {
3889         enum bpf_prog_type ptype;
3890         struct bpf_prog *prog;
3891         int ret;
3892
3893         if (CHECK_ATTR(BPF_LINK_CREATE))
3894                 return -EINVAL;
3895
3896         ptype = attach_type_to_prog_type(attr->link_create.attach_type);
3897         if (ptype == BPF_PROG_TYPE_UNSPEC)
3898                 return -EINVAL;
3899
3900         prog = bpf_prog_get_type(attr->link_create.prog_fd, ptype);
3901         if (IS_ERR(prog))
3902                 return PTR_ERR(prog);
3903
3904         ret = bpf_prog_attach_check_attach_type(prog,
3905                                                 attr->link_create.attach_type);
3906         if (ret)
3907                 goto err_out;
3908
3909         switch (ptype) {
3910         case BPF_PROG_TYPE_CGROUP_SKB:
3911         case BPF_PROG_TYPE_CGROUP_SOCK:
3912         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3913         case BPF_PROG_TYPE_SOCK_OPS:
3914         case BPF_PROG_TYPE_CGROUP_DEVICE:
3915         case BPF_PROG_TYPE_CGROUP_SYSCTL:
3916         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3917                 ret = cgroup_bpf_link_attach(attr, prog);
3918                 break;
3919         case BPF_PROG_TYPE_TRACING:
3920                 ret = tracing_bpf_link_attach(attr, prog);
3921                 break;
3922         case BPF_PROG_TYPE_FLOW_DISSECTOR:
3923         case BPF_PROG_TYPE_SK_LOOKUP:
3924                 ret = netns_bpf_link_create(attr, prog);
3925                 break;
3926 #ifdef CONFIG_NET
3927         case BPF_PROG_TYPE_XDP:
3928                 ret = bpf_xdp_link_attach(attr, prog);
3929                 break;
3930 #endif
3931         default:
3932                 ret = -EINVAL;
3933         }
3934
3935 err_out:
3936         if (ret < 0)
3937                 bpf_prog_put(prog);
3938         return ret;
3939 }
3940
3941 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
3942
3943 static int link_update(union bpf_attr *attr)
3944 {
3945         struct bpf_prog *old_prog = NULL, *new_prog;
3946         struct bpf_link *link;
3947         u32 flags;
3948         int ret;
3949
3950         if (CHECK_ATTR(BPF_LINK_UPDATE))
3951                 return -EINVAL;
3952
3953         flags = attr->link_update.flags;
3954         if (flags & ~BPF_F_REPLACE)
3955                 return -EINVAL;
3956
3957         link = bpf_link_get_from_fd(attr->link_update.link_fd);
3958         if (IS_ERR(link))
3959                 return PTR_ERR(link);
3960
3961         new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
3962         if (IS_ERR(new_prog)) {
3963                 ret = PTR_ERR(new_prog);
3964                 goto out_put_link;
3965         }
3966
3967         if (flags & BPF_F_REPLACE) {
3968                 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
3969                 if (IS_ERR(old_prog)) {
3970                         ret = PTR_ERR(old_prog);
3971                         old_prog = NULL;
3972                         goto out_put_progs;
3973                 }
3974         } else if (attr->link_update.old_prog_fd) {
3975                 ret = -EINVAL;
3976                 goto out_put_progs;
3977         }
3978
3979         if (link->ops->update_prog)
3980                 ret = link->ops->update_prog(link, new_prog, old_prog);
3981         else
3982                 ret = -EINVAL;
3983
3984 out_put_progs:
3985         if (old_prog)
3986                 bpf_prog_put(old_prog);
3987         if (ret)
3988                 bpf_prog_put(new_prog);
3989 out_put_link:
3990         bpf_link_put(link);
3991         return ret;
3992 }
3993
3994 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
3995
3996 static int link_detach(union bpf_attr *attr)
3997 {
3998         struct bpf_link *link;
3999         int ret;
4000
4001         if (CHECK_ATTR(BPF_LINK_DETACH))
4002                 return -EINVAL;
4003
4004         link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4005         if (IS_ERR(link))
4006                 return PTR_ERR(link);
4007
4008         if (link->ops->detach)
4009                 ret = link->ops->detach(link);
4010         else
4011                 ret = -EOPNOTSUPP;
4012
4013         bpf_link_put(link);
4014         return ret;
4015 }
4016
4017 static int bpf_link_inc_not_zero(struct bpf_link *link)
4018 {
4019         return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT;
4020 }
4021
4022 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4023
4024 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4025 {
4026         struct bpf_link *link;
4027         u32 id = attr->link_id;
4028         int fd, err;
4029
4030         if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4031                 return -EINVAL;
4032
4033         if (!capable(CAP_SYS_ADMIN))
4034                 return -EPERM;
4035
4036         spin_lock_bh(&link_idr_lock);
4037         link = idr_find(&link_idr, id);
4038         /* before link is "settled", ID is 0, pretend it doesn't exist yet */
4039         if (link) {
4040                 if (link->id)
4041                         err = bpf_link_inc_not_zero(link);
4042                 else
4043                         err = -EAGAIN;
4044         } else {
4045                 err = -ENOENT;
4046         }
4047         spin_unlock_bh(&link_idr_lock);
4048
4049         if (err)
4050                 return err;
4051
4052         fd = bpf_link_new_fd(link);
4053         if (fd < 0)
4054                 bpf_link_put(link);
4055
4056         return fd;
4057 }
4058
4059 DEFINE_MUTEX(bpf_stats_enabled_mutex);
4060
4061 static int bpf_stats_release(struct inode *inode, struct file *file)
4062 {
4063         mutex_lock(&bpf_stats_enabled_mutex);
4064         static_key_slow_dec(&bpf_stats_enabled_key.key);
4065         mutex_unlock(&bpf_stats_enabled_mutex);
4066         return 0;
4067 }
4068
4069 static const struct file_operations bpf_stats_fops = {
4070         .release = bpf_stats_release,
4071 };
4072
4073 static int bpf_enable_runtime_stats(void)
4074 {
4075         int fd;
4076
4077         mutex_lock(&bpf_stats_enabled_mutex);
4078
4079         /* Set a very high limit to avoid overflow */
4080         if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4081                 mutex_unlock(&bpf_stats_enabled_mutex);
4082                 return -EBUSY;
4083         }
4084
4085         fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4086         if (fd >= 0)
4087                 static_key_slow_inc(&bpf_stats_enabled_key.key);
4088
4089         mutex_unlock(&bpf_stats_enabled_mutex);
4090         return fd;
4091 }
4092
4093 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4094
4095 static int bpf_enable_stats(union bpf_attr *attr)
4096 {
4097
4098         if (CHECK_ATTR(BPF_ENABLE_STATS))
4099                 return -EINVAL;
4100
4101         if (!capable(CAP_SYS_ADMIN))
4102                 return -EPERM;
4103
4104         switch (attr->enable_stats.type) {
4105         case BPF_STATS_RUN_TIME:
4106                 return bpf_enable_runtime_stats();
4107         default:
4108                 break;
4109         }
4110         return -EINVAL;
4111 }
4112
4113 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4114
4115 static int bpf_iter_create(union bpf_attr *attr)
4116 {
4117         struct bpf_link *link;
4118         int err;
4119
4120         if (CHECK_ATTR(BPF_ITER_CREATE))
4121                 return -EINVAL;
4122
4123         if (attr->iter_create.flags)
4124                 return -EINVAL;
4125
4126         link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4127         if (IS_ERR(link))
4128                 return PTR_ERR(link);
4129
4130         err = bpf_iter_new_fd(link);
4131         bpf_link_put(link);
4132
4133         return err;
4134 }
4135
4136 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4137 {
4138         union bpf_attr attr;
4139         int err;
4140
4141         if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4142                 return -EPERM;
4143
4144         err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4145         if (err)
4146                 return err;
4147         size = min_t(u32, size, sizeof(attr));
4148
4149         /* copy attributes from user space, may be less than sizeof(bpf_attr) */
4150         memset(&attr, 0, sizeof(attr));
4151         if (copy_from_user(&attr, uattr, size) != 0)
4152                 return -EFAULT;
4153
4154         err = security_bpf(cmd, &attr, size);
4155         if (err < 0)
4156                 return err;
4157
4158         switch (cmd) {
4159         case BPF_MAP_CREATE:
4160                 err = map_create(&attr);
4161                 break;
4162         case BPF_MAP_LOOKUP_ELEM:
4163                 err = map_lookup_elem(&attr);
4164                 break;
4165         case BPF_MAP_UPDATE_ELEM:
4166                 err = map_update_elem(&attr);
4167                 break;
4168         case BPF_MAP_DELETE_ELEM:
4169                 err = map_delete_elem(&attr);
4170                 break;
4171         case BPF_MAP_GET_NEXT_KEY:
4172                 err = map_get_next_key(&attr);
4173                 break;
4174         case BPF_MAP_FREEZE:
4175                 err = map_freeze(&attr);
4176                 break;
4177         case BPF_PROG_LOAD:
4178                 err = bpf_prog_load(&attr, uattr);
4179                 break;
4180         case BPF_OBJ_PIN:
4181                 err = bpf_obj_pin(&attr);
4182                 break;
4183         case BPF_OBJ_GET:
4184                 err = bpf_obj_get(&attr);
4185                 break;
4186         case BPF_PROG_ATTACH:
4187                 err = bpf_prog_attach(&attr);
4188                 break;
4189         case BPF_PROG_DETACH:
4190                 err = bpf_prog_detach(&attr);
4191                 break;
4192         case BPF_PROG_QUERY:
4193                 err = bpf_prog_query(&attr, uattr);
4194                 break;
4195         case BPF_PROG_TEST_RUN:
4196                 err = bpf_prog_test_run(&attr, uattr);
4197                 break;
4198         case BPF_PROG_GET_NEXT_ID:
4199                 err = bpf_obj_get_next_id(&attr, uattr,
4200                                           &prog_idr, &prog_idr_lock);
4201                 break;
4202         case BPF_MAP_GET_NEXT_ID:
4203                 err = bpf_obj_get_next_id(&attr, uattr,
4204                                           &map_idr, &map_idr_lock);
4205                 break;
4206         case BPF_BTF_GET_NEXT_ID:
4207                 err = bpf_obj_get_next_id(&attr, uattr,
4208                                           &btf_idr, &btf_idr_lock);
4209                 break;
4210         case BPF_PROG_GET_FD_BY_ID:
4211                 err = bpf_prog_get_fd_by_id(&attr);
4212                 break;
4213         case BPF_MAP_GET_FD_BY_ID:
4214                 err = bpf_map_get_fd_by_id(&attr);
4215                 break;
4216         case BPF_OBJ_GET_INFO_BY_FD:
4217                 err = bpf_obj_get_info_by_fd(&attr, uattr);
4218                 break;
4219         case BPF_RAW_TRACEPOINT_OPEN:
4220                 err = bpf_raw_tracepoint_open(&attr);
4221                 break;
4222         case BPF_BTF_LOAD:
4223                 err = bpf_btf_load(&attr);
4224                 break;
4225         case BPF_BTF_GET_FD_BY_ID:
4226                 err = bpf_btf_get_fd_by_id(&attr);
4227                 break;
4228         case BPF_TASK_FD_QUERY:
4229                 err = bpf_task_fd_query(&attr, uattr);
4230                 break;
4231         case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4232                 err = map_lookup_and_delete_elem(&attr);
4233                 break;
4234         case BPF_MAP_LOOKUP_BATCH:
4235                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
4236                 break;
4237         case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4238                 err = bpf_map_do_batch(&attr, uattr,
4239                                        BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4240                 break;
4241         case BPF_MAP_UPDATE_BATCH:
4242                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
4243                 break;
4244         case BPF_MAP_DELETE_BATCH:
4245                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
4246                 break;
4247         case BPF_LINK_CREATE:
4248                 err = link_create(&attr);
4249                 break;
4250         case BPF_LINK_UPDATE:
4251                 err = link_update(&attr);
4252                 break;
4253         case BPF_LINK_GET_FD_BY_ID:
4254                 err = bpf_link_get_fd_by_id(&attr);
4255                 break;
4256         case BPF_LINK_GET_NEXT_ID:
4257                 err = bpf_obj_get_next_id(&attr, uattr,
4258                                           &link_idr, &link_idr_lock);
4259                 break;
4260         case BPF_ENABLE_STATS:
4261                 err = bpf_enable_stats(&attr);
4262                 break;
4263         case BPF_ITER_CREATE:
4264                 err = bpf_iter_create(&attr);
4265                 break;
4266         case BPF_LINK_DETACH:
4267                 err = link_detach(&attr);
4268                 break;
4269         default:
4270                 err = -EINVAL;
4271                 break;
4272         }
4273
4274         return err;
4275 }