8da159936bab17e4b322561457ca03f8608dffd3
[linux-2.6-microblaze.git] / kernel / bpf / syscall.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/btf.h>
8 #include <linux/syscalls.h>
9 #include <linux/slab.h>
10 #include <linux/sched/signal.h>
11 #include <linux/vmalloc.h>
12 #include <linux/mmzone.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/fdtable.h>
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
20 #include <linux/kernel.h>
21 #include <linux/idr.h>
22 #include <linux/cred.h>
23 #include <linux/timekeeping.h>
24 #include <linux/ctype.h>
25 #include <linux/nospec.h>
26 #include <linux/audit.h>
27 #include <uapi/linux/btf.h>
28 #include <linux/pgtable.h>
29 #include <linux/bpf_lsm.h>
30 #include <linux/poll.h>
31 #include <linux/bpf-netns.h>
32
33 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
34                           (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
35                           (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
36 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
37 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
38 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
39                         IS_FD_HASH(map))
40
41 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
42
43 DEFINE_PER_CPU(int, bpf_prog_active);
44 static DEFINE_IDR(prog_idr);
45 static DEFINE_SPINLOCK(prog_idr_lock);
46 static DEFINE_IDR(map_idr);
47 static DEFINE_SPINLOCK(map_idr_lock);
48 static DEFINE_IDR(link_idr);
49 static DEFINE_SPINLOCK(link_idr_lock);
50
51 int sysctl_unprivileged_bpf_disabled __read_mostly;
52
53 static const struct bpf_map_ops * const bpf_map_types[] = {
54 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
55 #define BPF_MAP_TYPE(_id, _ops) \
56         [_id] = &_ops,
57 #define BPF_LINK_TYPE(_id, _name)
58 #include <linux/bpf_types.h>
59 #undef BPF_PROG_TYPE
60 #undef BPF_MAP_TYPE
61 #undef BPF_LINK_TYPE
62 };
63
64 /*
65  * If we're handed a bigger struct than we know of, ensure all the unknown bits
66  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
67  * we don't know about yet.
68  *
69  * There is a ToCToU between this function call and the following
70  * copy_from_user() call. However, this is not a concern since this function is
71  * meant to be a future-proofing of bits.
72  */
73 int bpf_check_uarg_tail_zero(void __user *uaddr,
74                              size_t expected_size,
75                              size_t actual_size)
76 {
77         unsigned char __user *addr = uaddr + expected_size;
78         int res;
79
80         if (unlikely(actual_size > PAGE_SIZE))  /* silly large */
81                 return -E2BIG;
82
83         if (actual_size <= expected_size)
84                 return 0;
85
86         res = check_zeroed_user(addr, actual_size - expected_size);
87         if (res < 0)
88                 return res;
89         return res ? 0 : -E2BIG;
90 }
91
92 const struct bpf_map_ops bpf_map_offload_ops = {
93         .map_alloc = bpf_map_offload_map_alloc,
94         .map_free = bpf_map_offload_map_free,
95         .map_check_btf = map_check_no_btf,
96 };
97
98 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
99 {
100         const struct bpf_map_ops *ops;
101         u32 type = attr->map_type;
102         struct bpf_map *map;
103         int err;
104
105         if (type >= ARRAY_SIZE(bpf_map_types))
106                 return ERR_PTR(-EINVAL);
107         type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
108         ops = bpf_map_types[type];
109         if (!ops)
110                 return ERR_PTR(-EINVAL);
111
112         if (ops->map_alloc_check) {
113                 err = ops->map_alloc_check(attr);
114                 if (err)
115                         return ERR_PTR(err);
116         }
117         if (attr->map_ifindex)
118                 ops = &bpf_map_offload_ops;
119         map = ops->map_alloc(attr);
120         if (IS_ERR(map))
121                 return map;
122         map->ops = ops;
123         map->map_type = type;
124         return map;
125 }
126
127 static u32 bpf_map_value_size(struct bpf_map *map)
128 {
129         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
130             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
131             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
132             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
133                 return round_up(map->value_size, 8) * num_possible_cpus();
134         else if (IS_FD_MAP(map))
135                 return sizeof(u32);
136         else
137                 return  map->value_size;
138 }
139
140 static void maybe_wait_bpf_programs(struct bpf_map *map)
141 {
142         /* Wait for any running BPF programs to complete so that
143          * userspace, when we return to it, knows that all programs
144          * that could be running use the new map value.
145          */
146         if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
147             map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
148                 synchronize_rcu();
149 }
150
151 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
152                                 void *value, __u64 flags)
153 {
154         int err;
155
156         /* Need to create a kthread, thus must support schedule */
157         if (bpf_map_is_dev_bound(map)) {
158                 return bpf_map_offload_update_elem(map, key, value, flags);
159         } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
160                    map->map_type == BPF_MAP_TYPE_SOCKHASH ||
161                    map->map_type == BPF_MAP_TYPE_SOCKMAP ||
162                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
163                 return map->ops->map_update_elem(map, key, value, flags);
164         } else if (IS_FD_PROG_ARRAY(map)) {
165                 return bpf_fd_array_map_update_elem(map, f.file, key, value,
166                                                     flags);
167         }
168
169         bpf_disable_instrumentation();
170         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
171             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
172                 err = bpf_percpu_hash_update(map, key, value, flags);
173         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
174                 err = bpf_percpu_array_update(map, key, value, flags);
175         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
176                 err = bpf_percpu_cgroup_storage_update(map, key, value,
177                                                        flags);
178         } else if (IS_FD_ARRAY(map)) {
179                 rcu_read_lock();
180                 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
181                                                    flags);
182                 rcu_read_unlock();
183         } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
184                 rcu_read_lock();
185                 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
186                                                   flags);
187                 rcu_read_unlock();
188         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
189                 /* rcu_read_lock() is not needed */
190                 err = bpf_fd_reuseport_array_update_elem(map, key, value,
191                                                          flags);
192         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
193                    map->map_type == BPF_MAP_TYPE_STACK) {
194                 err = map->ops->map_push_elem(map, value, flags);
195         } else {
196                 rcu_read_lock();
197                 err = map->ops->map_update_elem(map, key, value, flags);
198                 rcu_read_unlock();
199         }
200         bpf_enable_instrumentation();
201         maybe_wait_bpf_programs(map);
202
203         return err;
204 }
205
206 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
207                               __u64 flags)
208 {
209         void *ptr;
210         int err;
211
212         if (bpf_map_is_dev_bound(map))
213                 return bpf_map_offload_lookup_elem(map, key, value);
214
215         bpf_disable_instrumentation();
216         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
217             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
218                 err = bpf_percpu_hash_copy(map, key, value);
219         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
220                 err = bpf_percpu_array_copy(map, key, value);
221         } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
222                 err = bpf_percpu_cgroup_storage_copy(map, key, value);
223         } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
224                 err = bpf_stackmap_copy(map, key, value);
225         } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
226                 err = bpf_fd_array_map_lookup_elem(map, key, value);
227         } else if (IS_FD_HASH(map)) {
228                 err = bpf_fd_htab_map_lookup_elem(map, key, value);
229         } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
230                 err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
231         } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
232                    map->map_type == BPF_MAP_TYPE_STACK) {
233                 err = map->ops->map_peek_elem(map, value);
234         } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
235                 /* struct_ops map requires directly updating "value" */
236                 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
237         } else {
238                 rcu_read_lock();
239                 if (map->ops->map_lookup_elem_sys_only)
240                         ptr = map->ops->map_lookup_elem_sys_only(map, key);
241                 else
242                         ptr = map->ops->map_lookup_elem(map, key);
243                 if (IS_ERR(ptr)) {
244                         err = PTR_ERR(ptr);
245                 } else if (!ptr) {
246                         err = -ENOENT;
247                 } else {
248                         err = 0;
249                         if (flags & BPF_F_LOCK)
250                                 /* lock 'ptr' and copy everything but lock */
251                                 copy_map_value_locked(map, value, ptr, true);
252                         else
253                                 copy_map_value(map, value, ptr);
254                         /* mask lock, since value wasn't zero inited */
255                         check_and_init_map_lock(map, value);
256                 }
257                 rcu_read_unlock();
258         }
259
260         bpf_enable_instrumentation();
261         maybe_wait_bpf_programs(map);
262
263         return err;
264 }
265
266 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
267 {
268         /* We really just want to fail instead of triggering OOM killer
269          * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
270          * which is used for lower order allocation requests.
271          *
272          * It has been observed that higher order allocation requests done by
273          * vmalloc with __GFP_NORETRY being set might fail due to not trying
274          * to reclaim memory from the page cache, thus we set
275          * __GFP_RETRY_MAYFAIL to avoid such situations.
276          */
277
278         const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
279         unsigned int flags = 0;
280         unsigned long align = 1;
281         void *area;
282
283         if (size >= SIZE_MAX)
284                 return NULL;
285
286         /* kmalloc()'ed memory can't be mmap()'ed */
287         if (mmapable) {
288                 BUG_ON(!PAGE_ALIGNED(size));
289                 align = SHMLBA;
290                 flags = VM_USERMAP;
291         } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
292                 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
293                                     numa_node);
294                 if (area != NULL)
295                         return area;
296         }
297
298         return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
299                         gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
300                         flags, numa_node, __builtin_return_address(0));
301 }
302
303 void *bpf_map_area_alloc(u64 size, int numa_node)
304 {
305         return __bpf_map_area_alloc(size, numa_node, false);
306 }
307
308 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
309 {
310         return __bpf_map_area_alloc(size, numa_node, true);
311 }
312
313 void bpf_map_area_free(void *area)
314 {
315         kvfree(area);
316 }
317
318 static u32 bpf_map_flags_retain_permanent(u32 flags)
319 {
320         /* Some map creation flags are not tied to the map object but
321          * rather to the map fd instead, so they have no meaning upon
322          * map object inspection since multiple file descriptors with
323          * different (access) properties can exist here. Thus, given
324          * this has zero meaning for the map itself, lets clear these
325          * from here.
326          */
327         return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
328 }
329
330 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
331 {
332         map->map_type = attr->map_type;
333         map->key_size = attr->key_size;
334         map->value_size = attr->value_size;
335         map->max_entries = attr->max_entries;
336         map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
337         map->numa_node = bpf_map_attr_numa_node(attr);
338 }
339
340 static int bpf_charge_memlock(struct user_struct *user, u32 pages)
341 {
342         unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
343
344         if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
345                 atomic_long_sub(pages, &user->locked_vm);
346                 return -EPERM;
347         }
348         return 0;
349 }
350
351 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
352 {
353         if (user)
354                 atomic_long_sub(pages, &user->locked_vm);
355 }
356
357 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
358 {
359         u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
360         struct user_struct *user;
361         int ret;
362
363         if (size >= U32_MAX - PAGE_SIZE)
364                 return -E2BIG;
365
366         user = get_current_user();
367         ret = bpf_charge_memlock(user, pages);
368         if (ret) {
369                 free_uid(user);
370                 return ret;
371         }
372
373         mem->pages = pages;
374         mem->user = user;
375
376         return 0;
377 }
378
379 void bpf_map_charge_finish(struct bpf_map_memory *mem)
380 {
381         bpf_uncharge_memlock(mem->user, mem->pages);
382         free_uid(mem->user);
383 }
384
385 void bpf_map_charge_move(struct bpf_map_memory *dst,
386                          struct bpf_map_memory *src)
387 {
388         *dst = *src;
389
390         /* Make sure src will not be used for the redundant uncharging. */
391         memset(src, 0, sizeof(struct bpf_map_memory));
392 }
393
394 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
395 {
396         int ret;
397
398         ret = bpf_charge_memlock(map->memory.user, pages);
399         if (ret)
400                 return ret;
401         map->memory.pages += pages;
402         return ret;
403 }
404
405 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
406 {
407         bpf_uncharge_memlock(map->memory.user, pages);
408         map->memory.pages -= pages;
409 }
410
411 static int bpf_map_alloc_id(struct bpf_map *map)
412 {
413         int id;
414
415         idr_preload(GFP_KERNEL);
416         spin_lock_bh(&map_idr_lock);
417         id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
418         if (id > 0)
419                 map->id = id;
420         spin_unlock_bh(&map_idr_lock);
421         idr_preload_end();
422
423         if (WARN_ON_ONCE(!id))
424                 return -ENOSPC;
425
426         return id > 0 ? 0 : id;
427 }
428
429 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
430 {
431         unsigned long flags;
432
433         /* Offloaded maps are removed from the IDR store when their device
434          * disappears - even if someone holds an fd to them they are unusable,
435          * the memory is gone, all ops will fail; they are simply waiting for
436          * refcnt to drop to be freed.
437          */
438         if (!map->id)
439                 return;
440
441         if (do_idr_lock)
442                 spin_lock_irqsave(&map_idr_lock, flags);
443         else
444                 __acquire(&map_idr_lock);
445
446         idr_remove(&map_idr, map->id);
447         map->id = 0;
448
449         if (do_idr_lock)
450                 spin_unlock_irqrestore(&map_idr_lock, flags);
451         else
452                 __release(&map_idr_lock);
453 }
454
455 /* called from workqueue */
456 static void bpf_map_free_deferred(struct work_struct *work)
457 {
458         struct bpf_map *map = container_of(work, struct bpf_map, work);
459         struct bpf_map_memory mem;
460
461         bpf_map_charge_move(&mem, &map->memory);
462         security_bpf_map_free(map);
463         /* implementation dependent freeing */
464         map->ops->map_free(map);
465         bpf_map_charge_finish(&mem);
466 }
467
468 static void bpf_map_put_uref(struct bpf_map *map)
469 {
470         if (atomic64_dec_and_test(&map->usercnt)) {
471                 if (map->ops->map_release_uref)
472                         map->ops->map_release_uref(map);
473         }
474 }
475
476 /* decrement map refcnt and schedule it for freeing via workqueue
477  * (unrelying map implementation ops->map_free() might sleep)
478  */
479 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
480 {
481         if (atomic64_dec_and_test(&map->refcnt)) {
482                 /* bpf_map_free_id() must be called first */
483                 bpf_map_free_id(map, do_idr_lock);
484                 btf_put(map->btf);
485                 INIT_WORK(&map->work, bpf_map_free_deferred);
486                 schedule_work(&map->work);
487         }
488 }
489
490 void bpf_map_put(struct bpf_map *map)
491 {
492         __bpf_map_put(map, true);
493 }
494 EXPORT_SYMBOL_GPL(bpf_map_put);
495
496 void bpf_map_put_with_uref(struct bpf_map *map)
497 {
498         bpf_map_put_uref(map);
499         bpf_map_put(map);
500 }
501
502 static int bpf_map_release(struct inode *inode, struct file *filp)
503 {
504         struct bpf_map *map = filp->private_data;
505
506         if (map->ops->map_release)
507                 map->ops->map_release(map, filp);
508
509         bpf_map_put_with_uref(map);
510         return 0;
511 }
512
513 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
514 {
515         fmode_t mode = f.file->f_mode;
516
517         /* Our file permissions may have been overridden by global
518          * map permissions facing syscall side.
519          */
520         if (READ_ONCE(map->frozen))
521                 mode &= ~FMODE_CAN_WRITE;
522         return mode;
523 }
524
525 #ifdef CONFIG_PROC_FS
526 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
527 {
528         const struct bpf_map *map = filp->private_data;
529         const struct bpf_array *array;
530         u32 type = 0, jited = 0;
531
532         if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
533                 array = container_of(map, struct bpf_array, map);
534                 type  = array->aux->type;
535                 jited = array->aux->jited;
536         }
537
538         seq_printf(m,
539                    "map_type:\t%u\n"
540                    "key_size:\t%u\n"
541                    "value_size:\t%u\n"
542                    "max_entries:\t%u\n"
543                    "map_flags:\t%#x\n"
544                    "memlock:\t%llu\n"
545                    "map_id:\t%u\n"
546                    "frozen:\t%u\n",
547                    map->map_type,
548                    map->key_size,
549                    map->value_size,
550                    map->max_entries,
551                    map->map_flags,
552                    map->memory.pages * 1ULL << PAGE_SHIFT,
553                    map->id,
554                    READ_ONCE(map->frozen));
555         if (type) {
556                 seq_printf(m, "owner_prog_type:\t%u\n", type);
557                 seq_printf(m, "owner_jited:\t%u\n", jited);
558         }
559 }
560 #endif
561
562 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
563                               loff_t *ppos)
564 {
565         /* We need this handler such that alloc_file() enables
566          * f_mode with FMODE_CAN_READ.
567          */
568         return -EINVAL;
569 }
570
571 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
572                                size_t siz, loff_t *ppos)
573 {
574         /* We need this handler such that alloc_file() enables
575          * f_mode with FMODE_CAN_WRITE.
576          */
577         return -EINVAL;
578 }
579
580 /* called for any extra memory-mapped regions (except initial) */
581 static void bpf_map_mmap_open(struct vm_area_struct *vma)
582 {
583         struct bpf_map *map = vma->vm_file->private_data;
584
585         if (vma->vm_flags & VM_MAYWRITE) {
586                 mutex_lock(&map->freeze_mutex);
587                 map->writecnt++;
588                 mutex_unlock(&map->freeze_mutex);
589         }
590 }
591
592 /* called for all unmapped memory region (including initial) */
593 static void bpf_map_mmap_close(struct vm_area_struct *vma)
594 {
595         struct bpf_map *map = vma->vm_file->private_data;
596
597         if (vma->vm_flags & VM_MAYWRITE) {
598                 mutex_lock(&map->freeze_mutex);
599                 map->writecnt--;
600                 mutex_unlock(&map->freeze_mutex);
601         }
602 }
603
604 static const struct vm_operations_struct bpf_map_default_vmops = {
605         .open           = bpf_map_mmap_open,
606         .close          = bpf_map_mmap_close,
607 };
608
609 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
610 {
611         struct bpf_map *map = filp->private_data;
612         int err;
613
614         if (!map->ops->map_mmap || map_value_has_spin_lock(map))
615                 return -ENOTSUPP;
616
617         if (!(vma->vm_flags & VM_SHARED))
618                 return -EINVAL;
619
620         mutex_lock(&map->freeze_mutex);
621
622         if (vma->vm_flags & VM_WRITE) {
623                 if (map->frozen) {
624                         err = -EPERM;
625                         goto out;
626                 }
627                 /* map is meant to be read-only, so do not allow mapping as
628                  * writable, because it's possible to leak a writable page
629                  * reference and allows user-space to still modify it after
630                  * freezing, while verifier will assume contents do not change
631                  */
632                 if (map->map_flags & BPF_F_RDONLY_PROG) {
633                         err = -EACCES;
634                         goto out;
635                 }
636         }
637
638         /* set default open/close callbacks */
639         vma->vm_ops = &bpf_map_default_vmops;
640         vma->vm_private_data = map;
641         vma->vm_flags &= ~VM_MAYEXEC;
642         if (!(vma->vm_flags & VM_WRITE))
643                 /* disallow re-mapping with PROT_WRITE */
644                 vma->vm_flags &= ~VM_MAYWRITE;
645
646         err = map->ops->map_mmap(map, vma);
647         if (err)
648                 goto out;
649
650         if (vma->vm_flags & VM_MAYWRITE)
651                 map->writecnt++;
652 out:
653         mutex_unlock(&map->freeze_mutex);
654         return err;
655 }
656
657 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
658 {
659         struct bpf_map *map = filp->private_data;
660
661         if (map->ops->map_poll)
662                 return map->ops->map_poll(map, filp, pts);
663
664         return EPOLLERR;
665 }
666
667 const struct file_operations bpf_map_fops = {
668 #ifdef CONFIG_PROC_FS
669         .show_fdinfo    = bpf_map_show_fdinfo,
670 #endif
671         .release        = bpf_map_release,
672         .read           = bpf_dummy_read,
673         .write          = bpf_dummy_write,
674         .mmap           = bpf_map_mmap,
675         .poll           = bpf_map_poll,
676 };
677
678 int bpf_map_new_fd(struct bpf_map *map, int flags)
679 {
680         int ret;
681
682         ret = security_bpf_map(map, OPEN_FMODE(flags));
683         if (ret < 0)
684                 return ret;
685
686         return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
687                                 flags | O_CLOEXEC);
688 }
689
690 int bpf_get_file_flag(int flags)
691 {
692         if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
693                 return -EINVAL;
694         if (flags & BPF_F_RDONLY)
695                 return O_RDONLY;
696         if (flags & BPF_F_WRONLY)
697                 return O_WRONLY;
698         return O_RDWR;
699 }
700
701 /* helper macro to check that unused fields 'union bpf_attr' are zero */
702 #define CHECK_ATTR(CMD) \
703         memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
704                    sizeof(attr->CMD##_LAST_FIELD), 0, \
705                    sizeof(*attr) - \
706                    offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
707                    sizeof(attr->CMD##_LAST_FIELD)) != NULL
708
709 /* dst and src must have at least "size" number of bytes.
710  * Return strlen on success and < 0 on error.
711  */
712 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
713 {
714         const char *end = src + size;
715         const char *orig_src = src;
716
717         memset(dst, 0, size);
718         /* Copy all isalnum(), '_' and '.' chars. */
719         while (src < end && *src) {
720                 if (!isalnum(*src) &&
721                     *src != '_' && *src != '.')
722                         return -EINVAL;
723                 *dst++ = *src++;
724         }
725
726         /* No '\0' found in "size" number of bytes */
727         if (src == end)
728                 return -EINVAL;
729
730         return src - orig_src;
731 }
732
733 int map_check_no_btf(const struct bpf_map *map,
734                      const struct btf *btf,
735                      const struct btf_type *key_type,
736                      const struct btf_type *value_type)
737 {
738         return -ENOTSUPP;
739 }
740
741 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
742                          u32 btf_key_id, u32 btf_value_id)
743 {
744         const struct btf_type *key_type, *value_type;
745         u32 key_size, value_size;
746         int ret = 0;
747
748         /* Some maps allow key to be unspecified. */
749         if (btf_key_id) {
750                 key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
751                 if (!key_type || key_size != map->key_size)
752                         return -EINVAL;
753         } else {
754                 key_type = btf_type_by_id(btf, 0);
755                 if (!map->ops->map_check_btf)
756                         return -EINVAL;
757         }
758
759         value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
760         if (!value_type || value_size != map->value_size)
761                 return -EINVAL;
762
763         map->spin_lock_off = btf_find_spin_lock(btf, value_type);
764
765         if (map_value_has_spin_lock(map)) {
766                 if (map->map_flags & BPF_F_RDONLY_PROG)
767                         return -EACCES;
768                 if (map->map_type != BPF_MAP_TYPE_HASH &&
769                     map->map_type != BPF_MAP_TYPE_ARRAY &&
770                     map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
771                     map->map_type != BPF_MAP_TYPE_SK_STORAGE)
772                         return -ENOTSUPP;
773                 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
774                     map->value_size) {
775                         WARN_ONCE(1,
776                                   "verifier bug spin_lock_off %d value_size %d\n",
777                                   map->spin_lock_off, map->value_size);
778                         return -EFAULT;
779                 }
780         }
781
782         if (map->ops->map_check_btf)
783                 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
784
785         return ret;
786 }
787
788 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
789 /* called via syscall */
790 static int map_create(union bpf_attr *attr)
791 {
792         int numa_node = bpf_map_attr_numa_node(attr);
793         struct bpf_map_memory mem;
794         struct bpf_map *map;
795         int f_flags;
796         int err;
797
798         err = CHECK_ATTR(BPF_MAP_CREATE);
799         if (err)
800                 return -EINVAL;
801
802         if (attr->btf_vmlinux_value_type_id) {
803                 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
804                     attr->btf_key_type_id || attr->btf_value_type_id)
805                         return -EINVAL;
806         } else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
807                 return -EINVAL;
808         }
809
810         f_flags = bpf_get_file_flag(attr->map_flags);
811         if (f_flags < 0)
812                 return f_flags;
813
814         if (numa_node != NUMA_NO_NODE &&
815             ((unsigned int)numa_node >= nr_node_ids ||
816              !node_online(numa_node)))
817                 return -EINVAL;
818
819         /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
820         map = find_and_alloc_map(attr);
821         if (IS_ERR(map))
822                 return PTR_ERR(map);
823
824         err = bpf_obj_name_cpy(map->name, attr->map_name,
825                                sizeof(attr->map_name));
826         if (err < 0)
827                 goto free_map;
828
829         atomic64_set(&map->refcnt, 1);
830         atomic64_set(&map->usercnt, 1);
831         mutex_init(&map->freeze_mutex);
832
833         map->spin_lock_off = -EINVAL;
834         if (attr->btf_key_type_id || attr->btf_value_type_id ||
835             /* Even the map's value is a kernel's struct,
836              * the bpf_prog.o must have BTF to begin with
837              * to figure out the corresponding kernel's
838              * counter part.  Thus, attr->btf_fd has
839              * to be valid also.
840              */
841             attr->btf_vmlinux_value_type_id) {
842                 struct btf *btf;
843
844                 btf = btf_get_by_fd(attr->btf_fd);
845                 if (IS_ERR(btf)) {
846                         err = PTR_ERR(btf);
847                         goto free_map;
848                 }
849                 map->btf = btf;
850
851                 if (attr->btf_value_type_id) {
852                         err = map_check_btf(map, btf, attr->btf_key_type_id,
853                                             attr->btf_value_type_id);
854                         if (err)
855                                 goto free_map;
856                 }
857
858                 map->btf_key_type_id = attr->btf_key_type_id;
859                 map->btf_value_type_id = attr->btf_value_type_id;
860                 map->btf_vmlinux_value_type_id =
861                         attr->btf_vmlinux_value_type_id;
862         }
863
864         err = security_bpf_map_alloc(map);
865         if (err)
866                 goto free_map;
867
868         err = bpf_map_alloc_id(map);
869         if (err)
870                 goto free_map_sec;
871
872         err = bpf_map_new_fd(map, f_flags);
873         if (err < 0) {
874                 /* failed to allocate fd.
875                  * bpf_map_put_with_uref() is needed because the above
876                  * bpf_map_alloc_id() has published the map
877                  * to the userspace and the userspace may
878                  * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
879                  */
880                 bpf_map_put_with_uref(map);
881                 return err;
882         }
883
884         return err;
885
886 free_map_sec:
887         security_bpf_map_free(map);
888 free_map:
889         btf_put(map->btf);
890         bpf_map_charge_move(&mem, &map->memory);
891         map->ops->map_free(map);
892         bpf_map_charge_finish(&mem);
893         return err;
894 }
895
896 /* if error is returned, fd is released.
897  * On success caller should complete fd access with matching fdput()
898  */
899 struct bpf_map *__bpf_map_get(struct fd f)
900 {
901         if (!f.file)
902                 return ERR_PTR(-EBADF);
903         if (f.file->f_op != &bpf_map_fops) {
904                 fdput(f);
905                 return ERR_PTR(-EINVAL);
906         }
907
908         return f.file->private_data;
909 }
910
911 void bpf_map_inc(struct bpf_map *map)
912 {
913         atomic64_inc(&map->refcnt);
914 }
915 EXPORT_SYMBOL_GPL(bpf_map_inc);
916
917 void bpf_map_inc_with_uref(struct bpf_map *map)
918 {
919         atomic64_inc(&map->refcnt);
920         atomic64_inc(&map->usercnt);
921 }
922 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
923
924 struct bpf_map *bpf_map_get(u32 ufd)
925 {
926         struct fd f = fdget(ufd);
927         struct bpf_map *map;
928
929         map = __bpf_map_get(f);
930         if (IS_ERR(map))
931                 return map;
932
933         bpf_map_inc(map);
934         fdput(f);
935
936         return map;
937 }
938
939 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
940 {
941         struct fd f = fdget(ufd);
942         struct bpf_map *map;
943
944         map = __bpf_map_get(f);
945         if (IS_ERR(map))
946                 return map;
947
948         bpf_map_inc_with_uref(map);
949         fdput(f);
950
951         return map;
952 }
953
954 /* map_idr_lock should have been held */
955 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
956 {
957         int refold;
958
959         refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
960         if (!refold)
961                 return ERR_PTR(-ENOENT);
962         if (uref)
963                 atomic64_inc(&map->usercnt);
964
965         return map;
966 }
967
968 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
969 {
970         spin_lock_bh(&map_idr_lock);
971         map = __bpf_map_inc_not_zero(map, false);
972         spin_unlock_bh(&map_idr_lock);
973
974         return map;
975 }
976 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
977
978 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
979 {
980         return -ENOTSUPP;
981 }
982
983 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
984 {
985         if (key_size)
986                 return memdup_user(ukey, key_size);
987
988         if (ukey)
989                 return ERR_PTR(-EINVAL);
990
991         return NULL;
992 }
993
994 /* last field in 'union bpf_attr' used by this command */
995 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
996
997 static int map_lookup_elem(union bpf_attr *attr)
998 {
999         void __user *ukey = u64_to_user_ptr(attr->key);
1000         void __user *uvalue = u64_to_user_ptr(attr->value);
1001         int ufd = attr->map_fd;
1002         struct bpf_map *map;
1003         void *key, *value;
1004         u32 value_size;
1005         struct fd f;
1006         int err;
1007
1008         if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1009                 return -EINVAL;
1010
1011         if (attr->flags & ~BPF_F_LOCK)
1012                 return -EINVAL;
1013
1014         f = fdget(ufd);
1015         map = __bpf_map_get(f);
1016         if (IS_ERR(map))
1017                 return PTR_ERR(map);
1018         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1019                 err = -EPERM;
1020                 goto err_put;
1021         }
1022
1023         if ((attr->flags & BPF_F_LOCK) &&
1024             !map_value_has_spin_lock(map)) {
1025                 err = -EINVAL;
1026                 goto err_put;
1027         }
1028
1029         key = __bpf_copy_key(ukey, map->key_size);
1030         if (IS_ERR(key)) {
1031                 err = PTR_ERR(key);
1032                 goto err_put;
1033         }
1034
1035         value_size = bpf_map_value_size(map);
1036
1037         err = -ENOMEM;
1038         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1039         if (!value)
1040                 goto free_key;
1041
1042         err = bpf_map_copy_value(map, key, value, attr->flags);
1043         if (err)
1044                 goto free_value;
1045
1046         err = -EFAULT;
1047         if (copy_to_user(uvalue, value, value_size) != 0)
1048                 goto free_value;
1049
1050         err = 0;
1051
1052 free_value:
1053         kfree(value);
1054 free_key:
1055         kfree(key);
1056 err_put:
1057         fdput(f);
1058         return err;
1059 }
1060
1061
1062 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1063
1064 static int map_update_elem(union bpf_attr *attr)
1065 {
1066         void __user *ukey = u64_to_user_ptr(attr->key);
1067         void __user *uvalue = u64_to_user_ptr(attr->value);
1068         int ufd = attr->map_fd;
1069         struct bpf_map *map;
1070         void *key, *value;
1071         u32 value_size;
1072         struct fd f;
1073         int err;
1074
1075         if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1076                 return -EINVAL;
1077
1078         f = fdget(ufd);
1079         map = __bpf_map_get(f);
1080         if (IS_ERR(map))
1081                 return PTR_ERR(map);
1082         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1083                 err = -EPERM;
1084                 goto err_put;
1085         }
1086
1087         if ((attr->flags & BPF_F_LOCK) &&
1088             !map_value_has_spin_lock(map)) {
1089                 err = -EINVAL;
1090                 goto err_put;
1091         }
1092
1093         key = __bpf_copy_key(ukey, map->key_size);
1094         if (IS_ERR(key)) {
1095                 err = PTR_ERR(key);
1096                 goto err_put;
1097         }
1098
1099         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1100             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1101             map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
1102             map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
1103                 value_size = round_up(map->value_size, 8) * num_possible_cpus();
1104         else
1105                 value_size = map->value_size;
1106
1107         err = -ENOMEM;
1108         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1109         if (!value)
1110                 goto free_key;
1111
1112         err = -EFAULT;
1113         if (copy_from_user(value, uvalue, value_size) != 0)
1114                 goto free_value;
1115
1116         err = bpf_map_update_value(map, f, key, value, attr->flags);
1117
1118 free_value:
1119         kfree(value);
1120 free_key:
1121         kfree(key);
1122 err_put:
1123         fdput(f);
1124         return err;
1125 }
1126
1127 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1128
1129 static int map_delete_elem(union bpf_attr *attr)
1130 {
1131         void __user *ukey = u64_to_user_ptr(attr->key);
1132         int ufd = attr->map_fd;
1133         struct bpf_map *map;
1134         struct fd f;
1135         void *key;
1136         int err;
1137
1138         if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1139                 return -EINVAL;
1140
1141         f = fdget(ufd);
1142         map = __bpf_map_get(f);
1143         if (IS_ERR(map))
1144                 return PTR_ERR(map);
1145         if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1146                 err = -EPERM;
1147                 goto err_put;
1148         }
1149
1150         key = __bpf_copy_key(ukey, map->key_size);
1151         if (IS_ERR(key)) {
1152                 err = PTR_ERR(key);
1153                 goto err_put;
1154         }
1155
1156         if (bpf_map_is_dev_bound(map)) {
1157                 err = bpf_map_offload_delete_elem(map, key);
1158                 goto out;
1159         } else if (IS_FD_PROG_ARRAY(map) ||
1160                    map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1161                 /* These maps require sleepable context */
1162                 err = map->ops->map_delete_elem(map, key);
1163                 goto out;
1164         }
1165
1166         bpf_disable_instrumentation();
1167         rcu_read_lock();
1168         err = map->ops->map_delete_elem(map, key);
1169         rcu_read_unlock();
1170         bpf_enable_instrumentation();
1171         maybe_wait_bpf_programs(map);
1172 out:
1173         kfree(key);
1174 err_put:
1175         fdput(f);
1176         return err;
1177 }
1178
1179 /* last field in 'union bpf_attr' used by this command */
1180 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1181
1182 static int map_get_next_key(union bpf_attr *attr)
1183 {
1184         void __user *ukey = u64_to_user_ptr(attr->key);
1185         void __user *unext_key = u64_to_user_ptr(attr->next_key);
1186         int ufd = attr->map_fd;
1187         struct bpf_map *map;
1188         void *key, *next_key;
1189         struct fd f;
1190         int err;
1191
1192         if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1193                 return -EINVAL;
1194
1195         f = fdget(ufd);
1196         map = __bpf_map_get(f);
1197         if (IS_ERR(map))
1198                 return PTR_ERR(map);
1199         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1200                 err = -EPERM;
1201                 goto err_put;
1202         }
1203
1204         if (ukey) {
1205                 key = __bpf_copy_key(ukey, map->key_size);
1206                 if (IS_ERR(key)) {
1207                         err = PTR_ERR(key);
1208                         goto err_put;
1209                 }
1210         } else {
1211                 key = NULL;
1212         }
1213
1214         err = -ENOMEM;
1215         next_key = kmalloc(map->key_size, GFP_USER);
1216         if (!next_key)
1217                 goto free_key;
1218
1219         if (bpf_map_is_dev_bound(map)) {
1220                 err = bpf_map_offload_get_next_key(map, key, next_key);
1221                 goto out;
1222         }
1223
1224         rcu_read_lock();
1225         err = map->ops->map_get_next_key(map, key, next_key);
1226         rcu_read_unlock();
1227 out:
1228         if (err)
1229                 goto free_next_key;
1230
1231         err = -EFAULT;
1232         if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1233                 goto free_next_key;
1234
1235         err = 0;
1236
1237 free_next_key:
1238         kfree(next_key);
1239 free_key:
1240         kfree(key);
1241 err_put:
1242         fdput(f);
1243         return err;
1244 }
1245
1246 int generic_map_delete_batch(struct bpf_map *map,
1247                              const union bpf_attr *attr,
1248                              union bpf_attr __user *uattr)
1249 {
1250         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1251         u32 cp, max_count;
1252         int err = 0;
1253         void *key;
1254
1255         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1256                 return -EINVAL;
1257
1258         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1259             !map_value_has_spin_lock(map)) {
1260                 return -EINVAL;
1261         }
1262
1263         max_count = attr->batch.count;
1264         if (!max_count)
1265                 return 0;
1266
1267         key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1268         if (!key)
1269                 return -ENOMEM;
1270
1271         for (cp = 0; cp < max_count; cp++) {
1272                 err = -EFAULT;
1273                 if (copy_from_user(key, keys + cp * map->key_size,
1274                                    map->key_size))
1275                         break;
1276
1277                 if (bpf_map_is_dev_bound(map)) {
1278                         err = bpf_map_offload_delete_elem(map, key);
1279                         break;
1280                 }
1281
1282                 bpf_disable_instrumentation();
1283                 rcu_read_lock();
1284                 err = map->ops->map_delete_elem(map, key);
1285                 rcu_read_unlock();
1286                 bpf_enable_instrumentation();
1287                 maybe_wait_bpf_programs(map);
1288                 if (err)
1289                         break;
1290         }
1291         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1292                 err = -EFAULT;
1293
1294         kfree(key);
1295         return err;
1296 }
1297
1298 int generic_map_update_batch(struct bpf_map *map,
1299                              const union bpf_attr *attr,
1300                              union bpf_attr __user *uattr)
1301 {
1302         void __user *values = u64_to_user_ptr(attr->batch.values);
1303         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1304         u32 value_size, cp, max_count;
1305         int ufd = attr->map_fd;
1306         void *key, *value;
1307         struct fd f;
1308         int err = 0;
1309
1310         f = fdget(ufd);
1311         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1312                 return -EINVAL;
1313
1314         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1315             !map_value_has_spin_lock(map)) {
1316                 return -EINVAL;
1317         }
1318
1319         value_size = bpf_map_value_size(map);
1320
1321         max_count = attr->batch.count;
1322         if (!max_count)
1323                 return 0;
1324
1325         key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1326         if (!key)
1327                 return -ENOMEM;
1328
1329         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1330         if (!value) {
1331                 kfree(key);
1332                 return -ENOMEM;
1333         }
1334
1335         for (cp = 0; cp < max_count; cp++) {
1336                 err = -EFAULT;
1337                 if (copy_from_user(key, keys + cp * map->key_size,
1338                     map->key_size) ||
1339                     copy_from_user(value, values + cp * value_size, value_size))
1340                         break;
1341
1342                 err = bpf_map_update_value(map, f, key, value,
1343                                            attr->batch.elem_flags);
1344
1345                 if (err)
1346                         break;
1347         }
1348
1349         if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1350                 err = -EFAULT;
1351
1352         kfree(value);
1353         kfree(key);
1354         return err;
1355 }
1356
1357 #define MAP_LOOKUP_RETRIES 3
1358
1359 int generic_map_lookup_batch(struct bpf_map *map,
1360                                     const union bpf_attr *attr,
1361                                     union bpf_attr __user *uattr)
1362 {
1363         void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1364         void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1365         void __user *values = u64_to_user_ptr(attr->batch.values);
1366         void __user *keys = u64_to_user_ptr(attr->batch.keys);
1367         void *buf, *buf_prevkey, *prev_key, *key, *value;
1368         int err, retry = MAP_LOOKUP_RETRIES;
1369         u32 value_size, cp, max_count;
1370
1371         if (attr->batch.elem_flags & ~BPF_F_LOCK)
1372                 return -EINVAL;
1373
1374         if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1375             !map_value_has_spin_lock(map))
1376                 return -EINVAL;
1377
1378         value_size = bpf_map_value_size(map);
1379
1380         max_count = attr->batch.count;
1381         if (!max_count)
1382                 return 0;
1383
1384         if (put_user(0, &uattr->batch.count))
1385                 return -EFAULT;
1386
1387         buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1388         if (!buf_prevkey)
1389                 return -ENOMEM;
1390
1391         buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1392         if (!buf) {
1393                 kfree(buf_prevkey);
1394                 return -ENOMEM;
1395         }
1396
1397         err = -EFAULT;
1398         prev_key = NULL;
1399         if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1400                 goto free_buf;
1401         key = buf;
1402         value = key + map->key_size;
1403         if (ubatch)
1404                 prev_key = buf_prevkey;
1405
1406         for (cp = 0; cp < max_count;) {
1407                 rcu_read_lock();
1408                 err = map->ops->map_get_next_key(map, prev_key, key);
1409                 rcu_read_unlock();
1410                 if (err)
1411                         break;
1412                 err = bpf_map_copy_value(map, key, value,
1413                                          attr->batch.elem_flags);
1414
1415                 if (err == -ENOENT) {
1416                         if (retry) {
1417                                 retry--;
1418                                 continue;
1419                         }
1420                         err = -EINTR;
1421                         break;
1422                 }
1423
1424                 if (err)
1425                         goto free_buf;
1426
1427                 if (copy_to_user(keys + cp * map->key_size, key,
1428                                  map->key_size)) {
1429                         err = -EFAULT;
1430                         goto free_buf;
1431                 }
1432                 if (copy_to_user(values + cp * value_size, value, value_size)) {
1433                         err = -EFAULT;
1434                         goto free_buf;
1435                 }
1436
1437                 if (!prev_key)
1438                         prev_key = buf_prevkey;
1439
1440                 swap(prev_key, key);
1441                 retry = MAP_LOOKUP_RETRIES;
1442                 cp++;
1443         }
1444
1445         if (err == -EFAULT)
1446                 goto free_buf;
1447
1448         if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1449                     (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1450                 err = -EFAULT;
1451
1452 free_buf:
1453         kfree(buf_prevkey);
1454         kfree(buf);
1455         return err;
1456 }
1457
1458 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1459
1460 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1461 {
1462         void __user *ukey = u64_to_user_ptr(attr->key);
1463         void __user *uvalue = u64_to_user_ptr(attr->value);
1464         int ufd = attr->map_fd;
1465         struct bpf_map *map;
1466         void *key, *value;
1467         u32 value_size;
1468         struct fd f;
1469         int err;
1470
1471         if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1472                 return -EINVAL;
1473
1474         f = fdget(ufd);
1475         map = __bpf_map_get(f);
1476         if (IS_ERR(map))
1477                 return PTR_ERR(map);
1478         if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1479             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1480                 err = -EPERM;
1481                 goto err_put;
1482         }
1483
1484         key = __bpf_copy_key(ukey, map->key_size);
1485         if (IS_ERR(key)) {
1486                 err = PTR_ERR(key);
1487                 goto err_put;
1488         }
1489
1490         value_size = map->value_size;
1491
1492         err = -ENOMEM;
1493         value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1494         if (!value)
1495                 goto free_key;
1496
1497         if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1498             map->map_type == BPF_MAP_TYPE_STACK) {
1499                 err = map->ops->map_pop_elem(map, value);
1500         } else {
1501                 err = -ENOTSUPP;
1502         }
1503
1504         if (err)
1505                 goto free_value;
1506
1507         if (copy_to_user(uvalue, value, value_size) != 0) {
1508                 err = -EFAULT;
1509                 goto free_value;
1510         }
1511
1512         err = 0;
1513
1514 free_value:
1515         kfree(value);
1516 free_key:
1517         kfree(key);
1518 err_put:
1519         fdput(f);
1520         return err;
1521 }
1522
1523 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1524
1525 static int map_freeze(const union bpf_attr *attr)
1526 {
1527         int err = 0, ufd = attr->map_fd;
1528         struct bpf_map *map;
1529         struct fd f;
1530
1531         if (CHECK_ATTR(BPF_MAP_FREEZE))
1532                 return -EINVAL;
1533
1534         f = fdget(ufd);
1535         map = __bpf_map_get(f);
1536         if (IS_ERR(map))
1537                 return PTR_ERR(map);
1538
1539         if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1540                 fdput(f);
1541                 return -ENOTSUPP;
1542         }
1543
1544         mutex_lock(&map->freeze_mutex);
1545
1546         if (map->writecnt) {
1547                 err = -EBUSY;
1548                 goto err_put;
1549         }
1550         if (READ_ONCE(map->frozen)) {
1551                 err = -EBUSY;
1552                 goto err_put;
1553         }
1554         if (!bpf_capable()) {
1555                 err = -EPERM;
1556                 goto err_put;
1557         }
1558
1559         WRITE_ONCE(map->frozen, true);
1560 err_put:
1561         mutex_unlock(&map->freeze_mutex);
1562         fdput(f);
1563         return err;
1564 }
1565
1566 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1567 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1568         [_id] = & _name ## _prog_ops,
1569 #define BPF_MAP_TYPE(_id, _ops)
1570 #define BPF_LINK_TYPE(_id, _name)
1571 #include <linux/bpf_types.h>
1572 #undef BPF_PROG_TYPE
1573 #undef BPF_MAP_TYPE
1574 #undef BPF_LINK_TYPE
1575 };
1576
1577 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1578 {
1579         const struct bpf_prog_ops *ops;
1580
1581         if (type >= ARRAY_SIZE(bpf_prog_types))
1582                 return -EINVAL;
1583         type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1584         ops = bpf_prog_types[type];
1585         if (!ops)
1586                 return -EINVAL;
1587
1588         if (!bpf_prog_is_dev_bound(prog->aux))
1589                 prog->aux->ops = ops;
1590         else
1591                 prog->aux->ops = &bpf_offload_prog_ops;
1592         prog->type = type;
1593         return 0;
1594 }
1595
1596 enum bpf_audit {
1597         BPF_AUDIT_LOAD,
1598         BPF_AUDIT_UNLOAD,
1599         BPF_AUDIT_MAX,
1600 };
1601
1602 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1603         [BPF_AUDIT_LOAD]   = "LOAD",
1604         [BPF_AUDIT_UNLOAD] = "UNLOAD",
1605 };
1606
1607 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1608 {
1609         struct audit_context *ctx = NULL;
1610         struct audit_buffer *ab;
1611
1612         if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1613                 return;
1614         if (audit_enabled == AUDIT_OFF)
1615                 return;
1616         if (op == BPF_AUDIT_LOAD)
1617                 ctx = audit_context();
1618         ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1619         if (unlikely(!ab))
1620                 return;
1621         audit_log_format(ab, "prog-id=%u op=%s",
1622                          prog->aux->id, bpf_audit_str[op]);
1623         audit_log_end(ab);
1624 }
1625
1626 int __bpf_prog_charge(struct user_struct *user, u32 pages)
1627 {
1628         unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1629         unsigned long user_bufs;
1630
1631         if (user) {
1632                 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
1633                 if (user_bufs > memlock_limit) {
1634                         atomic_long_sub(pages, &user->locked_vm);
1635                         return -EPERM;
1636                 }
1637         }
1638
1639         return 0;
1640 }
1641
1642 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1643 {
1644         if (user)
1645                 atomic_long_sub(pages, &user->locked_vm);
1646 }
1647
1648 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
1649 {
1650         struct user_struct *user = get_current_user();
1651         int ret;
1652
1653         ret = __bpf_prog_charge(user, prog->pages);
1654         if (ret) {
1655                 free_uid(user);
1656                 return ret;
1657         }
1658
1659         prog->aux->user = user;
1660         return 0;
1661 }
1662
1663 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
1664 {
1665         struct user_struct *user = prog->aux->user;
1666
1667         __bpf_prog_uncharge(user, prog->pages);
1668         free_uid(user);
1669 }
1670
1671 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1672 {
1673         int id;
1674
1675         idr_preload(GFP_KERNEL);
1676         spin_lock_bh(&prog_idr_lock);
1677         id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1678         if (id > 0)
1679                 prog->aux->id = id;
1680         spin_unlock_bh(&prog_idr_lock);
1681         idr_preload_end();
1682
1683         /* id is in [1, INT_MAX) */
1684         if (WARN_ON_ONCE(!id))
1685                 return -ENOSPC;
1686
1687         return id > 0 ? 0 : id;
1688 }
1689
1690 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1691 {
1692         /* cBPF to eBPF migrations are currently not in the idr store.
1693          * Offloaded programs are removed from the store when their device
1694          * disappears - even if someone grabs an fd to them they are unusable,
1695          * simply waiting for refcnt to drop to be freed.
1696          */
1697         if (!prog->aux->id)
1698                 return;
1699
1700         if (do_idr_lock)
1701                 spin_lock_bh(&prog_idr_lock);
1702         else
1703                 __acquire(&prog_idr_lock);
1704
1705         idr_remove(&prog_idr, prog->aux->id);
1706         prog->aux->id = 0;
1707
1708         if (do_idr_lock)
1709                 spin_unlock_bh(&prog_idr_lock);
1710         else
1711                 __release(&prog_idr_lock);
1712 }
1713
1714 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1715 {
1716         struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1717
1718         kvfree(aux->func_info);
1719         kfree(aux->func_info_aux);
1720         bpf_prog_uncharge_memlock(aux->prog);
1721         security_bpf_prog_free(aux);
1722         bpf_prog_free(aux->prog);
1723 }
1724
1725 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1726 {
1727         bpf_prog_kallsyms_del_all(prog);
1728         btf_put(prog->aux->btf);
1729         bpf_prog_free_linfo(prog);
1730
1731         if (deferred)
1732                 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1733         else
1734                 __bpf_prog_put_rcu(&prog->aux->rcu);
1735 }
1736
1737 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1738 {
1739         if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1740                 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1741                 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1742                 /* bpf_prog_free_id() must be called first */
1743                 bpf_prog_free_id(prog, do_idr_lock);
1744                 __bpf_prog_put_noref(prog, true);
1745         }
1746 }
1747
1748 void bpf_prog_put(struct bpf_prog *prog)
1749 {
1750         __bpf_prog_put(prog, true);
1751 }
1752 EXPORT_SYMBOL_GPL(bpf_prog_put);
1753
1754 static int bpf_prog_release(struct inode *inode, struct file *filp)
1755 {
1756         struct bpf_prog *prog = filp->private_data;
1757
1758         bpf_prog_put(prog);
1759         return 0;
1760 }
1761
1762 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1763                                struct bpf_prog_stats *stats)
1764 {
1765         u64 nsecs = 0, cnt = 0;
1766         int cpu;
1767
1768         for_each_possible_cpu(cpu) {
1769                 const struct bpf_prog_stats *st;
1770                 unsigned int start;
1771                 u64 tnsecs, tcnt;
1772
1773                 st = per_cpu_ptr(prog->aux->stats, cpu);
1774                 do {
1775                         start = u64_stats_fetch_begin_irq(&st->syncp);
1776                         tnsecs = st->nsecs;
1777                         tcnt = st->cnt;
1778                 } while (u64_stats_fetch_retry_irq(&st->syncp, start));
1779                 nsecs += tnsecs;
1780                 cnt += tcnt;
1781         }
1782         stats->nsecs = nsecs;
1783         stats->cnt = cnt;
1784 }
1785
1786 #ifdef CONFIG_PROC_FS
1787 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1788 {
1789         const struct bpf_prog *prog = filp->private_data;
1790         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1791         struct bpf_prog_stats stats;
1792
1793         bpf_prog_get_stats(prog, &stats);
1794         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1795         seq_printf(m,
1796                    "prog_type:\t%u\n"
1797                    "prog_jited:\t%u\n"
1798                    "prog_tag:\t%s\n"
1799                    "memlock:\t%llu\n"
1800                    "prog_id:\t%u\n"
1801                    "run_time_ns:\t%llu\n"
1802                    "run_cnt:\t%llu\n",
1803                    prog->type,
1804                    prog->jited,
1805                    prog_tag,
1806                    prog->pages * 1ULL << PAGE_SHIFT,
1807                    prog->aux->id,
1808                    stats.nsecs,
1809                    stats.cnt);
1810 }
1811 #endif
1812
1813 const struct file_operations bpf_prog_fops = {
1814 #ifdef CONFIG_PROC_FS
1815         .show_fdinfo    = bpf_prog_show_fdinfo,
1816 #endif
1817         .release        = bpf_prog_release,
1818         .read           = bpf_dummy_read,
1819         .write          = bpf_dummy_write,
1820 };
1821
1822 int bpf_prog_new_fd(struct bpf_prog *prog)
1823 {
1824         int ret;
1825
1826         ret = security_bpf_prog(prog);
1827         if (ret < 0)
1828                 return ret;
1829
1830         return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1831                                 O_RDWR | O_CLOEXEC);
1832 }
1833
1834 static struct bpf_prog *____bpf_prog_get(struct fd f)
1835 {
1836         if (!f.file)
1837                 return ERR_PTR(-EBADF);
1838         if (f.file->f_op != &bpf_prog_fops) {
1839                 fdput(f);
1840                 return ERR_PTR(-EINVAL);
1841         }
1842
1843         return f.file->private_data;
1844 }
1845
1846 void bpf_prog_add(struct bpf_prog *prog, int i)
1847 {
1848         atomic64_add(i, &prog->aux->refcnt);
1849 }
1850 EXPORT_SYMBOL_GPL(bpf_prog_add);
1851
1852 void bpf_prog_sub(struct bpf_prog *prog, int i)
1853 {
1854         /* Only to be used for undoing previous bpf_prog_add() in some
1855          * error path. We still know that another entity in our call
1856          * path holds a reference to the program, thus atomic_sub() can
1857          * be safely used in such cases!
1858          */
1859         WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1860 }
1861 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1862
1863 void bpf_prog_inc(struct bpf_prog *prog)
1864 {
1865         atomic64_inc(&prog->aux->refcnt);
1866 }
1867 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1868
1869 /* prog_idr_lock should have been held */
1870 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1871 {
1872         int refold;
1873
1874         refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1875
1876         if (!refold)
1877                 return ERR_PTR(-ENOENT);
1878
1879         return prog;
1880 }
1881 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1882
1883 bool bpf_prog_get_ok(struct bpf_prog *prog,
1884                             enum bpf_prog_type *attach_type, bool attach_drv)
1885 {
1886         /* not an attachment, just a refcount inc, always allow */
1887         if (!attach_type)
1888                 return true;
1889
1890         if (prog->type != *attach_type)
1891                 return false;
1892         if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1893                 return false;
1894
1895         return true;
1896 }
1897
1898 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1899                                        bool attach_drv)
1900 {
1901         struct fd f = fdget(ufd);
1902         struct bpf_prog *prog;
1903
1904         prog = ____bpf_prog_get(f);
1905         if (IS_ERR(prog))
1906                 return prog;
1907         if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1908                 prog = ERR_PTR(-EINVAL);
1909                 goto out;
1910         }
1911
1912         bpf_prog_inc(prog);
1913 out:
1914         fdput(f);
1915         return prog;
1916 }
1917
1918 struct bpf_prog *bpf_prog_get(u32 ufd)
1919 {
1920         return __bpf_prog_get(ufd, NULL, false);
1921 }
1922
1923 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1924                                        bool attach_drv)
1925 {
1926         return __bpf_prog_get(ufd, &type, attach_drv);
1927 }
1928 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1929
1930 /* Initially all BPF programs could be loaded w/o specifying
1931  * expected_attach_type. Later for some of them specifying expected_attach_type
1932  * at load time became required so that program could be validated properly.
1933  * Programs of types that are allowed to be loaded both w/ and w/o (for
1934  * backward compatibility) expected_attach_type, should have the default attach
1935  * type assigned to expected_attach_type for the latter case, so that it can be
1936  * validated later at attach time.
1937  *
1938  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1939  * prog type requires it but has some attach types that have to be backward
1940  * compatible.
1941  */
1942 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1943 {
1944         switch (attr->prog_type) {
1945         case BPF_PROG_TYPE_CGROUP_SOCK:
1946                 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1947                  * exist so checking for non-zero is the way to go here.
1948                  */
1949                 if (!attr->expected_attach_type)
1950                         attr->expected_attach_type =
1951                                 BPF_CGROUP_INET_SOCK_CREATE;
1952                 break;
1953         }
1954 }
1955
1956 static int
1957 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1958                            enum bpf_attach_type expected_attach_type,
1959                            u32 btf_id, u32 prog_fd)
1960 {
1961         if (btf_id) {
1962                 if (btf_id > BTF_MAX_TYPE)
1963                         return -EINVAL;
1964
1965                 switch (prog_type) {
1966                 case BPF_PROG_TYPE_TRACING:
1967                 case BPF_PROG_TYPE_LSM:
1968                 case BPF_PROG_TYPE_STRUCT_OPS:
1969                 case BPF_PROG_TYPE_EXT:
1970                         break;
1971                 default:
1972                         return -EINVAL;
1973                 }
1974         }
1975
1976         if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING &&
1977             prog_type != BPF_PROG_TYPE_EXT)
1978                 return -EINVAL;
1979
1980         switch (prog_type) {
1981         case BPF_PROG_TYPE_CGROUP_SOCK:
1982                 switch (expected_attach_type) {
1983                 case BPF_CGROUP_INET_SOCK_CREATE:
1984                 case BPF_CGROUP_INET4_POST_BIND:
1985                 case BPF_CGROUP_INET6_POST_BIND:
1986                         return 0;
1987                 default:
1988                         return -EINVAL;
1989                 }
1990         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1991                 switch (expected_attach_type) {
1992                 case BPF_CGROUP_INET4_BIND:
1993                 case BPF_CGROUP_INET6_BIND:
1994                 case BPF_CGROUP_INET4_CONNECT:
1995                 case BPF_CGROUP_INET6_CONNECT:
1996                 case BPF_CGROUP_INET4_GETPEERNAME:
1997                 case BPF_CGROUP_INET6_GETPEERNAME:
1998                 case BPF_CGROUP_INET4_GETSOCKNAME:
1999                 case BPF_CGROUP_INET6_GETSOCKNAME:
2000                 case BPF_CGROUP_UDP4_SENDMSG:
2001                 case BPF_CGROUP_UDP6_SENDMSG:
2002                 case BPF_CGROUP_UDP4_RECVMSG:
2003                 case BPF_CGROUP_UDP6_RECVMSG:
2004                         return 0;
2005                 default:
2006                         return -EINVAL;
2007                 }
2008         case BPF_PROG_TYPE_CGROUP_SKB:
2009                 switch (expected_attach_type) {
2010                 case BPF_CGROUP_INET_INGRESS:
2011                 case BPF_CGROUP_INET_EGRESS:
2012                         return 0;
2013                 default:
2014                         return -EINVAL;
2015                 }
2016         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2017                 switch (expected_attach_type) {
2018                 case BPF_CGROUP_SETSOCKOPT:
2019                 case BPF_CGROUP_GETSOCKOPT:
2020                         return 0;
2021                 default:
2022                         return -EINVAL;
2023                 }
2024         case BPF_PROG_TYPE_EXT:
2025                 if (expected_attach_type)
2026                         return -EINVAL;
2027                 /* fallthrough */
2028         default:
2029                 return 0;
2030         }
2031 }
2032
2033 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2034 {
2035         switch (prog_type) {
2036         case BPF_PROG_TYPE_SCHED_CLS:
2037         case BPF_PROG_TYPE_SCHED_ACT:
2038         case BPF_PROG_TYPE_XDP:
2039         case BPF_PROG_TYPE_LWT_IN:
2040         case BPF_PROG_TYPE_LWT_OUT:
2041         case BPF_PROG_TYPE_LWT_XMIT:
2042         case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2043         case BPF_PROG_TYPE_SK_SKB:
2044         case BPF_PROG_TYPE_SK_MSG:
2045         case BPF_PROG_TYPE_LIRC_MODE2:
2046         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2047         case BPF_PROG_TYPE_CGROUP_DEVICE:
2048         case BPF_PROG_TYPE_CGROUP_SOCK:
2049         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2050         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2051         case BPF_PROG_TYPE_CGROUP_SYSCTL:
2052         case BPF_PROG_TYPE_SOCK_OPS:
2053         case BPF_PROG_TYPE_EXT: /* extends any prog */
2054                 return true;
2055         case BPF_PROG_TYPE_CGROUP_SKB:
2056                 /* always unpriv */
2057         case BPF_PROG_TYPE_SK_REUSEPORT:
2058                 /* equivalent to SOCKET_FILTER. need CAP_BPF only */
2059         default:
2060                 return false;
2061         }
2062 }
2063
2064 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2065 {
2066         switch (prog_type) {
2067         case BPF_PROG_TYPE_KPROBE:
2068         case BPF_PROG_TYPE_TRACEPOINT:
2069         case BPF_PROG_TYPE_PERF_EVENT:
2070         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2071         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2072         case BPF_PROG_TYPE_TRACING:
2073         case BPF_PROG_TYPE_LSM:
2074         case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2075         case BPF_PROG_TYPE_EXT: /* extends any prog */
2076                 return true;
2077         default:
2078                 return false;
2079         }
2080 }
2081
2082 /* last field in 'union bpf_attr' used by this command */
2083 #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
2084
2085 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
2086 {
2087         enum bpf_prog_type type = attr->prog_type;
2088         struct bpf_prog *prog;
2089         int err;
2090         char license[128];
2091         bool is_gpl;
2092
2093         if (CHECK_ATTR(BPF_PROG_LOAD))
2094                 return -EINVAL;
2095
2096         if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2097                                  BPF_F_ANY_ALIGNMENT |
2098                                  BPF_F_TEST_STATE_FREQ |
2099                                  BPF_F_TEST_RND_HI32))
2100                 return -EINVAL;
2101
2102         if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2103             (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2104             !bpf_capable())
2105                 return -EPERM;
2106
2107         /* copy eBPF program license from user space */
2108         if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
2109                               sizeof(license) - 1) < 0)
2110                 return -EFAULT;
2111         license[sizeof(license) - 1] = 0;
2112
2113         /* eBPF programs must be GPL compatible to use GPL-ed functions */
2114         is_gpl = license_is_gpl_compatible(license);
2115
2116         if (attr->insn_cnt == 0 ||
2117             attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2118                 return -E2BIG;
2119         if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2120             type != BPF_PROG_TYPE_CGROUP_SKB &&
2121             !bpf_capable())
2122                 return -EPERM;
2123
2124         if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN))
2125                 return -EPERM;
2126         if (is_perfmon_prog_type(type) && !perfmon_capable())
2127                 return -EPERM;
2128
2129         bpf_prog_load_fixup_attach_type(attr);
2130         if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2131                                        attr->attach_btf_id,
2132                                        attr->attach_prog_fd))
2133                 return -EINVAL;
2134
2135         /* plain bpf_prog allocation */
2136         prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2137         if (!prog)
2138                 return -ENOMEM;
2139
2140         prog->expected_attach_type = attr->expected_attach_type;
2141         prog->aux->attach_btf_id = attr->attach_btf_id;
2142         if (attr->attach_prog_fd) {
2143                 struct bpf_prog *tgt_prog;
2144
2145                 tgt_prog = bpf_prog_get(attr->attach_prog_fd);
2146                 if (IS_ERR(tgt_prog)) {
2147                         err = PTR_ERR(tgt_prog);
2148                         goto free_prog_nouncharge;
2149                 }
2150                 prog->aux->linked_prog = tgt_prog;
2151         }
2152
2153         prog->aux->offload_requested = !!attr->prog_ifindex;
2154
2155         err = security_bpf_prog_alloc(prog->aux);
2156         if (err)
2157                 goto free_prog_nouncharge;
2158
2159         err = bpf_prog_charge_memlock(prog);
2160         if (err)
2161                 goto free_prog_sec;
2162
2163         prog->len = attr->insn_cnt;
2164
2165         err = -EFAULT;
2166         if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
2167                            bpf_prog_insn_size(prog)) != 0)
2168                 goto free_prog;
2169
2170         prog->orig_prog = NULL;
2171         prog->jited = 0;
2172
2173         atomic64_set(&prog->aux->refcnt, 1);
2174         prog->gpl_compatible = is_gpl ? 1 : 0;
2175
2176         if (bpf_prog_is_dev_bound(prog->aux)) {
2177                 err = bpf_prog_offload_init(prog, attr);
2178                 if (err)
2179                         goto free_prog;
2180         }
2181
2182         /* find program type: socket_filter vs tracing_filter */
2183         err = find_prog_type(type, prog);
2184         if (err < 0)
2185                 goto free_prog;
2186
2187         prog->aux->load_time = ktime_get_boottime_ns();
2188         err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2189                                sizeof(attr->prog_name));
2190         if (err < 0)
2191                 goto free_prog;
2192
2193         /* run eBPF verifier */
2194         err = bpf_check(&prog, attr, uattr);
2195         if (err < 0)
2196                 goto free_used_maps;
2197
2198         prog = bpf_prog_select_runtime(prog, &err);
2199         if (err < 0)
2200                 goto free_used_maps;
2201
2202         err = bpf_prog_alloc_id(prog);
2203         if (err)
2204                 goto free_used_maps;
2205
2206         /* Upon success of bpf_prog_alloc_id(), the BPF prog is
2207          * effectively publicly exposed. However, retrieving via
2208          * bpf_prog_get_fd_by_id() will take another reference,
2209          * therefore it cannot be gone underneath us.
2210          *
2211          * Only for the time /after/ successful bpf_prog_new_fd()
2212          * and before returning to userspace, we might just hold
2213          * one reference and any parallel close on that fd could
2214          * rip everything out. Hence, below notifications must
2215          * happen before bpf_prog_new_fd().
2216          *
2217          * Also, any failure handling from this point onwards must
2218          * be using bpf_prog_put() given the program is exposed.
2219          */
2220         bpf_prog_kallsyms_add(prog);
2221         perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2222         bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2223
2224         err = bpf_prog_new_fd(prog);
2225         if (err < 0)
2226                 bpf_prog_put(prog);
2227         return err;
2228
2229 free_used_maps:
2230         /* In case we have subprogs, we need to wait for a grace
2231          * period before we can tear down JIT memory since symbols
2232          * are already exposed under kallsyms.
2233          */
2234         __bpf_prog_put_noref(prog, prog->aux->func_cnt);
2235         return err;
2236 free_prog:
2237         bpf_prog_uncharge_memlock(prog);
2238 free_prog_sec:
2239         security_bpf_prog_free(prog->aux);
2240 free_prog_nouncharge:
2241         bpf_prog_free(prog);
2242         return err;
2243 }
2244
2245 #define BPF_OBJ_LAST_FIELD file_flags
2246
2247 static int bpf_obj_pin(const union bpf_attr *attr)
2248 {
2249         if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2250                 return -EINVAL;
2251
2252         return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2253 }
2254
2255 static int bpf_obj_get(const union bpf_attr *attr)
2256 {
2257         if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2258             attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2259                 return -EINVAL;
2260
2261         return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2262                                 attr->file_flags);
2263 }
2264
2265 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2266                    const struct bpf_link_ops *ops, struct bpf_prog *prog)
2267 {
2268         atomic64_set(&link->refcnt, 1);
2269         link->type = type;
2270         link->id = 0;
2271         link->ops = ops;
2272         link->prog = prog;
2273 }
2274
2275 static void bpf_link_free_id(int id)
2276 {
2277         if (!id)
2278                 return;
2279
2280         spin_lock_bh(&link_idr_lock);
2281         idr_remove(&link_idr, id);
2282         spin_unlock_bh(&link_idr_lock);
2283 }
2284
2285 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2286  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2287  * anon_inode's release() call. This helper marksbpf_link as
2288  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2289  * is not decremented, it's the responsibility of a calling code that failed
2290  * to complete bpf_link initialization.
2291  */
2292 void bpf_link_cleanup(struct bpf_link_primer *primer)
2293 {
2294         primer->link->prog = NULL;
2295         bpf_link_free_id(primer->id);
2296         fput(primer->file);
2297         put_unused_fd(primer->fd);
2298 }
2299
2300 void bpf_link_inc(struct bpf_link *link)
2301 {
2302         atomic64_inc(&link->refcnt);
2303 }
2304
2305 /* bpf_link_free is guaranteed to be called from process context */
2306 static void bpf_link_free(struct bpf_link *link)
2307 {
2308         bpf_link_free_id(link->id);
2309         if (link->prog) {
2310                 /* detach BPF program, clean up used resources */
2311                 link->ops->release(link);
2312                 bpf_prog_put(link->prog);
2313         }
2314         /* free bpf_link and its containing memory */
2315         link->ops->dealloc(link);
2316 }
2317
2318 static void bpf_link_put_deferred(struct work_struct *work)
2319 {
2320         struct bpf_link *link = container_of(work, struct bpf_link, work);
2321
2322         bpf_link_free(link);
2323 }
2324
2325 /* bpf_link_put can be called from atomic context, but ensures that resources
2326  * are freed from process context
2327  */
2328 void bpf_link_put(struct bpf_link *link)
2329 {
2330         if (!atomic64_dec_and_test(&link->refcnt))
2331                 return;
2332
2333         if (in_atomic()) {
2334                 INIT_WORK(&link->work, bpf_link_put_deferred);
2335                 schedule_work(&link->work);
2336         } else {
2337                 bpf_link_free(link);
2338         }
2339 }
2340
2341 static int bpf_link_release(struct inode *inode, struct file *filp)
2342 {
2343         struct bpf_link *link = filp->private_data;
2344
2345         bpf_link_put(link);
2346         return 0;
2347 }
2348
2349 #ifdef CONFIG_PROC_FS
2350 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2351 #define BPF_MAP_TYPE(_id, _ops)
2352 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2353 static const char *bpf_link_type_strs[] = {
2354         [BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2355 #include <linux/bpf_types.h>
2356 };
2357 #undef BPF_PROG_TYPE
2358 #undef BPF_MAP_TYPE
2359 #undef BPF_LINK_TYPE
2360
2361 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2362 {
2363         const struct bpf_link *link = filp->private_data;
2364         const struct bpf_prog *prog = link->prog;
2365         char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2366
2367         bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2368         seq_printf(m,
2369                    "link_type:\t%s\n"
2370                    "link_id:\t%u\n"
2371                    "prog_tag:\t%s\n"
2372                    "prog_id:\t%u\n",
2373                    bpf_link_type_strs[link->type],
2374                    link->id,
2375                    prog_tag,
2376                    prog->aux->id);
2377         if (link->ops->show_fdinfo)
2378                 link->ops->show_fdinfo(link, m);
2379 }
2380 #endif
2381
2382 static const struct file_operations bpf_link_fops = {
2383 #ifdef CONFIG_PROC_FS
2384         .show_fdinfo    = bpf_link_show_fdinfo,
2385 #endif
2386         .release        = bpf_link_release,
2387         .read           = bpf_dummy_read,
2388         .write          = bpf_dummy_write,
2389 };
2390
2391 static int bpf_link_alloc_id(struct bpf_link *link)
2392 {
2393         int id;
2394
2395         idr_preload(GFP_KERNEL);
2396         spin_lock_bh(&link_idr_lock);
2397         id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2398         spin_unlock_bh(&link_idr_lock);
2399         idr_preload_end();
2400
2401         return id;
2402 }
2403
2404 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2405  * reserving unused FD and allocating ID from link_idr. This is to be paired
2406  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2407  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2408  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2409  * transient state is passed around in struct bpf_link_primer.
2410  * This is preferred way to create and initialize bpf_link, especially when
2411  * there are complicated and expensive operations inbetween creating bpf_link
2412  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2413  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2414  * expensive (and potentially failing) roll back operations in a rare case
2415  * that file, FD, or ID can't be allocated.
2416  */
2417 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2418 {
2419         struct file *file;
2420         int fd, id;
2421
2422         fd = get_unused_fd_flags(O_CLOEXEC);
2423         if (fd < 0)
2424                 return fd;
2425
2426
2427         id = bpf_link_alloc_id(link);
2428         if (id < 0) {
2429                 put_unused_fd(fd);
2430                 return id;
2431         }
2432
2433         file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2434         if (IS_ERR(file)) {
2435                 bpf_link_free_id(id);
2436                 put_unused_fd(fd);
2437                 return PTR_ERR(file);
2438         }
2439
2440         primer->link = link;
2441         primer->file = file;
2442         primer->fd = fd;
2443         primer->id = id;
2444         return 0;
2445 }
2446
2447 int bpf_link_settle(struct bpf_link_primer *primer)
2448 {
2449         /* make bpf_link fetchable by ID */
2450         spin_lock_bh(&link_idr_lock);
2451         primer->link->id = primer->id;
2452         spin_unlock_bh(&link_idr_lock);
2453         /* make bpf_link fetchable by FD */
2454         fd_install(primer->fd, primer->file);
2455         /* pass through installed FD */
2456         return primer->fd;
2457 }
2458
2459 int bpf_link_new_fd(struct bpf_link *link)
2460 {
2461         return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2462 }
2463
2464 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2465 {
2466         struct fd f = fdget(ufd);
2467         struct bpf_link *link;
2468
2469         if (!f.file)
2470                 return ERR_PTR(-EBADF);
2471         if (f.file->f_op != &bpf_link_fops) {
2472                 fdput(f);
2473                 return ERR_PTR(-EINVAL);
2474         }
2475
2476         link = f.file->private_data;
2477         bpf_link_inc(link);
2478         fdput(f);
2479
2480         return link;
2481 }
2482
2483 struct bpf_tracing_link {
2484         struct bpf_link link;
2485         enum bpf_attach_type attach_type;
2486 };
2487
2488 static void bpf_tracing_link_release(struct bpf_link *link)
2489 {
2490         WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog));
2491 }
2492
2493 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2494 {
2495         struct bpf_tracing_link *tr_link =
2496                 container_of(link, struct bpf_tracing_link, link);
2497
2498         kfree(tr_link);
2499 }
2500
2501 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2502                                          struct seq_file *seq)
2503 {
2504         struct bpf_tracing_link *tr_link =
2505                 container_of(link, struct bpf_tracing_link, link);
2506
2507         seq_printf(seq,
2508                    "attach_type:\t%d\n",
2509                    tr_link->attach_type);
2510 }
2511
2512 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2513                                            struct bpf_link_info *info)
2514 {
2515         struct bpf_tracing_link *tr_link =
2516                 container_of(link, struct bpf_tracing_link, link);
2517
2518         info->tracing.attach_type = tr_link->attach_type;
2519
2520         return 0;
2521 }
2522
2523 static const struct bpf_link_ops bpf_tracing_link_lops = {
2524         .release = bpf_tracing_link_release,
2525         .dealloc = bpf_tracing_link_dealloc,
2526         .show_fdinfo = bpf_tracing_link_show_fdinfo,
2527         .fill_link_info = bpf_tracing_link_fill_link_info,
2528 };
2529
2530 static int bpf_tracing_prog_attach(struct bpf_prog *prog)
2531 {
2532         struct bpf_link_primer link_primer;
2533         struct bpf_tracing_link *link;
2534         int err;
2535
2536         switch (prog->type) {
2537         case BPF_PROG_TYPE_TRACING:
2538                 if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2539                     prog->expected_attach_type != BPF_TRACE_FEXIT &&
2540                     prog->expected_attach_type != BPF_MODIFY_RETURN) {
2541                         err = -EINVAL;
2542                         goto out_put_prog;
2543                 }
2544                 break;
2545         case BPF_PROG_TYPE_EXT:
2546                 if (prog->expected_attach_type != 0) {
2547                         err = -EINVAL;
2548                         goto out_put_prog;
2549                 }
2550                 break;
2551         case BPF_PROG_TYPE_LSM:
2552                 if (prog->expected_attach_type != BPF_LSM_MAC) {
2553                         err = -EINVAL;
2554                         goto out_put_prog;
2555                 }
2556                 break;
2557         default:
2558                 err = -EINVAL;
2559                 goto out_put_prog;
2560         }
2561
2562         link = kzalloc(sizeof(*link), GFP_USER);
2563         if (!link) {
2564                 err = -ENOMEM;
2565                 goto out_put_prog;
2566         }
2567         bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2568                       &bpf_tracing_link_lops, prog);
2569         link->attach_type = prog->expected_attach_type;
2570
2571         err = bpf_link_prime(&link->link, &link_primer);
2572         if (err) {
2573                 kfree(link);
2574                 goto out_put_prog;
2575         }
2576
2577         err = bpf_trampoline_link_prog(prog);
2578         if (err) {
2579                 bpf_link_cleanup(&link_primer);
2580                 goto out_put_prog;
2581         }
2582
2583         return bpf_link_settle(&link_primer);
2584 out_put_prog:
2585         bpf_prog_put(prog);
2586         return err;
2587 }
2588
2589 struct bpf_raw_tp_link {
2590         struct bpf_link link;
2591         struct bpf_raw_event_map *btp;
2592 };
2593
2594 static void bpf_raw_tp_link_release(struct bpf_link *link)
2595 {
2596         struct bpf_raw_tp_link *raw_tp =
2597                 container_of(link, struct bpf_raw_tp_link, link);
2598
2599         bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2600         bpf_put_raw_tracepoint(raw_tp->btp);
2601 }
2602
2603 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2604 {
2605         struct bpf_raw_tp_link *raw_tp =
2606                 container_of(link, struct bpf_raw_tp_link, link);
2607
2608         kfree(raw_tp);
2609 }
2610
2611 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2612                                         struct seq_file *seq)
2613 {
2614         struct bpf_raw_tp_link *raw_tp_link =
2615                 container_of(link, struct bpf_raw_tp_link, link);
2616
2617         seq_printf(seq,
2618                    "tp_name:\t%s\n",
2619                    raw_tp_link->btp->tp->name);
2620 }
2621
2622 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2623                                           struct bpf_link_info *info)
2624 {
2625         struct bpf_raw_tp_link *raw_tp_link =
2626                 container_of(link, struct bpf_raw_tp_link, link);
2627         char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2628         const char *tp_name = raw_tp_link->btp->tp->name;
2629         u32 ulen = info->raw_tracepoint.tp_name_len;
2630         size_t tp_len = strlen(tp_name);
2631
2632         if (ulen && !ubuf)
2633                 return -EINVAL;
2634
2635         info->raw_tracepoint.tp_name_len = tp_len + 1;
2636
2637         if (!ubuf)
2638                 return 0;
2639
2640         if (ulen >= tp_len + 1) {
2641                 if (copy_to_user(ubuf, tp_name, tp_len + 1))
2642                         return -EFAULT;
2643         } else {
2644                 char zero = '\0';
2645
2646                 if (copy_to_user(ubuf, tp_name, ulen - 1))
2647                         return -EFAULT;
2648                 if (put_user(zero, ubuf + ulen - 1))
2649                         return -EFAULT;
2650                 return -ENOSPC;
2651         }
2652
2653         return 0;
2654 }
2655
2656 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2657         .release = bpf_raw_tp_link_release,
2658         .dealloc = bpf_raw_tp_link_dealloc,
2659         .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2660         .fill_link_info = bpf_raw_tp_link_fill_link_info,
2661 };
2662
2663 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2664
2665 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2666 {
2667         struct bpf_link_primer link_primer;
2668         struct bpf_raw_tp_link *link;
2669         struct bpf_raw_event_map *btp;
2670         struct bpf_prog *prog;
2671         const char *tp_name;
2672         char buf[128];
2673         int err;
2674
2675         if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2676                 return -EINVAL;
2677
2678         prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2679         if (IS_ERR(prog))
2680                 return PTR_ERR(prog);
2681
2682         switch (prog->type) {
2683         case BPF_PROG_TYPE_TRACING:
2684         case BPF_PROG_TYPE_EXT:
2685         case BPF_PROG_TYPE_LSM:
2686                 if (attr->raw_tracepoint.name) {
2687                         /* The attach point for this category of programs
2688                          * should be specified via btf_id during program load.
2689                          */
2690                         err = -EINVAL;
2691                         goto out_put_prog;
2692                 }
2693                 if (prog->type == BPF_PROG_TYPE_TRACING &&
2694                     prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2695                         tp_name = prog->aux->attach_func_name;
2696                         break;
2697                 }
2698                 return bpf_tracing_prog_attach(prog);
2699         case BPF_PROG_TYPE_RAW_TRACEPOINT:
2700         case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2701                 if (strncpy_from_user(buf,
2702                                       u64_to_user_ptr(attr->raw_tracepoint.name),
2703                                       sizeof(buf) - 1) < 0) {
2704                         err = -EFAULT;
2705                         goto out_put_prog;
2706                 }
2707                 buf[sizeof(buf) - 1] = 0;
2708                 tp_name = buf;
2709                 break;
2710         default:
2711                 err = -EINVAL;
2712                 goto out_put_prog;
2713         }
2714
2715         btp = bpf_get_raw_tracepoint(tp_name);
2716         if (!btp) {
2717                 err = -ENOENT;
2718                 goto out_put_prog;
2719         }
2720
2721         link = kzalloc(sizeof(*link), GFP_USER);
2722         if (!link) {
2723                 err = -ENOMEM;
2724                 goto out_put_btp;
2725         }
2726         bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
2727                       &bpf_raw_tp_link_lops, prog);
2728         link->btp = btp;
2729
2730         err = bpf_link_prime(&link->link, &link_primer);
2731         if (err) {
2732                 kfree(link);
2733                 goto out_put_btp;
2734         }
2735
2736         err = bpf_probe_register(link->btp, prog);
2737         if (err) {
2738                 bpf_link_cleanup(&link_primer);
2739                 goto out_put_btp;
2740         }
2741
2742         return bpf_link_settle(&link_primer);
2743
2744 out_put_btp:
2745         bpf_put_raw_tracepoint(btp);
2746 out_put_prog:
2747         bpf_prog_put(prog);
2748         return err;
2749 }
2750
2751 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2752                                              enum bpf_attach_type attach_type)
2753 {
2754         switch (prog->type) {
2755         case BPF_PROG_TYPE_CGROUP_SOCK:
2756         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2757         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2758                 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2759         case BPF_PROG_TYPE_CGROUP_SKB:
2760                 if (!capable(CAP_NET_ADMIN))
2761                         /* cg-skb progs can be loaded by unpriv user.
2762                          * check permissions at attach time.
2763                          */
2764                         return -EPERM;
2765                 return prog->enforce_expected_attach_type &&
2766                         prog->expected_attach_type != attach_type ?
2767                         -EINVAL : 0;
2768         default:
2769                 return 0;
2770         }
2771 }
2772
2773 static enum bpf_prog_type
2774 attach_type_to_prog_type(enum bpf_attach_type attach_type)
2775 {
2776         switch (attach_type) {
2777         case BPF_CGROUP_INET_INGRESS:
2778         case BPF_CGROUP_INET_EGRESS:
2779                 return BPF_PROG_TYPE_CGROUP_SKB;
2780                 break;
2781         case BPF_CGROUP_INET_SOCK_CREATE:
2782         case BPF_CGROUP_INET4_POST_BIND:
2783         case BPF_CGROUP_INET6_POST_BIND:
2784                 return BPF_PROG_TYPE_CGROUP_SOCK;
2785         case BPF_CGROUP_INET4_BIND:
2786         case BPF_CGROUP_INET6_BIND:
2787         case BPF_CGROUP_INET4_CONNECT:
2788         case BPF_CGROUP_INET6_CONNECT:
2789         case BPF_CGROUP_INET4_GETPEERNAME:
2790         case BPF_CGROUP_INET6_GETPEERNAME:
2791         case BPF_CGROUP_INET4_GETSOCKNAME:
2792         case BPF_CGROUP_INET6_GETSOCKNAME:
2793         case BPF_CGROUP_UDP4_SENDMSG:
2794         case BPF_CGROUP_UDP6_SENDMSG:
2795         case BPF_CGROUP_UDP4_RECVMSG:
2796         case BPF_CGROUP_UDP6_RECVMSG:
2797                 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2798         case BPF_CGROUP_SOCK_OPS:
2799                 return BPF_PROG_TYPE_SOCK_OPS;
2800         case BPF_CGROUP_DEVICE:
2801                 return BPF_PROG_TYPE_CGROUP_DEVICE;
2802         case BPF_SK_MSG_VERDICT:
2803                 return BPF_PROG_TYPE_SK_MSG;
2804         case BPF_SK_SKB_STREAM_PARSER:
2805         case BPF_SK_SKB_STREAM_VERDICT:
2806                 return BPF_PROG_TYPE_SK_SKB;
2807         case BPF_LIRC_MODE2:
2808                 return BPF_PROG_TYPE_LIRC_MODE2;
2809         case BPF_FLOW_DISSECTOR:
2810                 return BPF_PROG_TYPE_FLOW_DISSECTOR;
2811         case BPF_CGROUP_SYSCTL:
2812                 return BPF_PROG_TYPE_CGROUP_SYSCTL;
2813         case BPF_CGROUP_GETSOCKOPT:
2814         case BPF_CGROUP_SETSOCKOPT:
2815                 return BPF_PROG_TYPE_CGROUP_SOCKOPT;
2816         case BPF_TRACE_ITER:
2817                 return BPF_PROG_TYPE_TRACING;
2818         default:
2819                 return BPF_PROG_TYPE_UNSPEC;
2820         }
2821 }
2822
2823 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
2824
2825 #define BPF_F_ATTACH_MASK \
2826         (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
2827
2828 static int bpf_prog_attach(const union bpf_attr *attr)
2829 {
2830         enum bpf_prog_type ptype;
2831         struct bpf_prog *prog;
2832         int ret;
2833
2834         if (CHECK_ATTR(BPF_PROG_ATTACH))
2835                 return -EINVAL;
2836
2837         if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
2838                 return -EINVAL;
2839
2840         ptype = attach_type_to_prog_type(attr->attach_type);
2841         if (ptype == BPF_PROG_TYPE_UNSPEC)
2842                 return -EINVAL;
2843
2844         prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
2845         if (IS_ERR(prog))
2846                 return PTR_ERR(prog);
2847
2848         if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
2849                 bpf_prog_put(prog);
2850                 return -EINVAL;
2851         }
2852
2853         switch (ptype) {
2854         case BPF_PROG_TYPE_SK_SKB:
2855         case BPF_PROG_TYPE_SK_MSG:
2856                 ret = sock_map_get_from_fd(attr, prog);
2857                 break;
2858         case BPF_PROG_TYPE_LIRC_MODE2:
2859                 ret = lirc_prog_attach(attr, prog);
2860                 break;
2861         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2862                 ret = netns_bpf_prog_attach(attr, prog);
2863                 break;
2864         case BPF_PROG_TYPE_CGROUP_DEVICE:
2865         case BPF_PROG_TYPE_CGROUP_SKB:
2866         case BPF_PROG_TYPE_CGROUP_SOCK:
2867         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2868         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2869         case BPF_PROG_TYPE_CGROUP_SYSCTL:
2870         case BPF_PROG_TYPE_SOCK_OPS:
2871                 ret = cgroup_bpf_prog_attach(attr, ptype, prog);
2872                 break;
2873         default:
2874                 ret = -EINVAL;
2875         }
2876
2877         if (ret)
2878                 bpf_prog_put(prog);
2879         return ret;
2880 }
2881
2882 #define BPF_PROG_DETACH_LAST_FIELD attach_type
2883
2884 static int bpf_prog_detach(const union bpf_attr *attr)
2885 {
2886         enum bpf_prog_type ptype;
2887
2888         if (CHECK_ATTR(BPF_PROG_DETACH))
2889                 return -EINVAL;
2890
2891         ptype = attach_type_to_prog_type(attr->attach_type);
2892
2893         switch (ptype) {
2894         case BPF_PROG_TYPE_SK_MSG:
2895         case BPF_PROG_TYPE_SK_SKB:
2896                 return sock_map_get_from_fd(attr, NULL);
2897         case BPF_PROG_TYPE_LIRC_MODE2:
2898                 return lirc_prog_detach(attr);
2899         case BPF_PROG_TYPE_FLOW_DISSECTOR:
2900                 if (!capable(CAP_NET_ADMIN))
2901                         return -EPERM;
2902                 return netns_bpf_prog_detach(attr);
2903         case BPF_PROG_TYPE_CGROUP_DEVICE:
2904         case BPF_PROG_TYPE_CGROUP_SKB:
2905         case BPF_PROG_TYPE_CGROUP_SOCK:
2906         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2907         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2908         case BPF_PROG_TYPE_CGROUP_SYSCTL:
2909         case BPF_PROG_TYPE_SOCK_OPS:
2910                 return cgroup_bpf_prog_detach(attr, ptype);
2911         default:
2912                 return -EINVAL;
2913         }
2914 }
2915
2916 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
2917
2918 static int bpf_prog_query(const union bpf_attr *attr,
2919                           union bpf_attr __user *uattr)
2920 {
2921         if (!capable(CAP_NET_ADMIN))
2922                 return -EPERM;
2923         if (CHECK_ATTR(BPF_PROG_QUERY))
2924                 return -EINVAL;
2925         if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
2926                 return -EINVAL;
2927
2928         switch (attr->query.attach_type) {
2929         case BPF_CGROUP_INET_INGRESS:
2930         case BPF_CGROUP_INET_EGRESS:
2931         case BPF_CGROUP_INET_SOCK_CREATE:
2932         case BPF_CGROUP_INET4_BIND:
2933         case BPF_CGROUP_INET6_BIND:
2934         case BPF_CGROUP_INET4_POST_BIND:
2935         case BPF_CGROUP_INET6_POST_BIND:
2936         case BPF_CGROUP_INET4_CONNECT:
2937         case BPF_CGROUP_INET6_CONNECT:
2938         case BPF_CGROUP_INET4_GETPEERNAME:
2939         case BPF_CGROUP_INET6_GETPEERNAME:
2940         case BPF_CGROUP_INET4_GETSOCKNAME:
2941         case BPF_CGROUP_INET6_GETSOCKNAME:
2942         case BPF_CGROUP_UDP4_SENDMSG:
2943         case BPF_CGROUP_UDP6_SENDMSG:
2944         case BPF_CGROUP_UDP4_RECVMSG:
2945         case BPF_CGROUP_UDP6_RECVMSG:
2946         case BPF_CGROUP_SOCK_OPS:
2947         case BPF_CGROUP_DEVICE:
2948         case BPF_CGROUP_SYSCTL:
2949         case BPF_CGROUP_GETSOCKOPT:
2950         case BPF_CGROUP_SETSOCKOPT:
2951                 return cgroup_bpf_prog_query(attr, uattr);
2952         case BPF_LIRC_MODE2:
2953                 return lirc_prog_query(attr, uattr);
2954         case BPF_FLOW_DISSECTOR:
2955                 return netns_bpf_prog_query(attr, uattr);
2956         default:
2957                 return -EINVAL;
2958         }
2959 }
2960
2961 #define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
2962
2963 static int bpf_prog_test_run(const union bpf_attr *attr,
2964                              union bpf_attr __user *uattr)
2965 {
2966         struct bpf_prog *prog;
2967         int ret = -ENOTSUPP;
2968
2969         if (CHECK_ATTR(BPF_PROG_TEST_RUN))
2970                 return -EINVAL;
2971
2972         if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
2973             (!attr->test.ctx_size_in && attr->test.ctx_in))
2974                 return -EINVAL;
2975
2976         if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
2977             (!attr->test.ctx_size_out && attr->test.ctx_out))
2978                 return -EINVAL;
2979
2980         prog = bpf_prog_get(attr->test.prog_fd);
2981         if (IS_ERR(prog))
2982                 return PTR_ERR(prog);
2983
2984         if (prog->aux->ops->test_run)
2985                 ret = prog->aux->ops->test_run(prog, attr, uattr);
2986
2987         bpf_prog_put(prog);
2988         return ret;
2989 }
2990
2991 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
2992
2993 static int bpf_obj_get_next_id(const union bpf_attr *attr,
2994                                union bpf_attr __user *uattr,
2995                                struct idr *idr,
2996                                spinlock_t *lock)
2997 {
2998         u32 next_id = attr->start_id;
2999         int err = 0;
3000
3001         if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3002                 return -EINVAL;
3003
3004         if (!capable(CAP_SYS_ADMIN))
3005                 return -EPERM;
3006
3007         next_id++;
3008         spin_lock_bh(lock);
3009         if (!idr_get_next(idr, &next_id))
3010                 err = -ENOENT;
3011         spin_unlock_bh(lock);
3012
3013         if (!err)
3014                 err = put_user(next_id, &uattr->next_id);
3015
3016         return err;
3017 }
3018
3019 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3020 {
3021         struct bpf_map *map;
3022
3023         spin_lock_bh(&map_idr_lock);
3024 again:
3025         map = idr_get_next(&map_idr, id);
3026         if (map) {
3027                 map = __bpf_map_inc_not_zero(map, false);
3028                 if (IS_ERR(map)) {
3029                         (*id)++;
3030                         goto again;
3031                 }
3032         }
3033         spin_unlock_bh(&map_idr_lock);
3034
3035         return map;
3036 }
3037
3038 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3039
3040 struct bpf_prog *bpf_prog_by_id(u32 id)
3041 {
3042         struct bpf_prog *prog;
3043
3044         if (!id)
3045                 return ERR_PTR(-ENOENT);
3046
3047         spin_lock_bh(&prog_idr_lock);
3048         prog = idr_find(&prog_idr, id);
3049         if (prog)
3050                 prog = bpf_prog_inc_not_zero(prog);
3051         else
3052                 prog = ERR_PTR(-ENOENT);
3053         spin_unlock_bh(&prog_idr_lock);
3054         return prog;
3055 }
3056
3057 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3058 {
3059         struct bpf_prog *prog;
3060         u32 id = attr->prog_id;
3061         int fd;
3062
3063         if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3064                 return -EINVAL;
3065
3066         if (!capable(CAP_SYS_ADMIN))
3067                 return -EPERM;
3068
3069         prog = bpf_prog_by_id(id);
3070         if (IS_ERR(prog))
3071                 return PTR_ERR(prog);
3072
3073         fd = bpf_prog_new_fd(prog);
3074         if (fd < 0)
3075                 bpf_prog_put(prog);
3076
3077         return fd;
3078 }
3079
3080 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3081
3082 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3083 {
3084         struct bpf_map *map;
3085         u32 id = attr->map_id;
3086         int f_flags;
3087         int fd;
3088
3089         if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3090             attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3091                 return -EINVAL;
3092
3093         if (!capable(CAP_SYS_ADMIN))
3094                 return -EPERM;
3095
3096         f_flags = bpf_get_file_flag(attr->open_flags);
3097         if (f_flags < 0)
3098                 return f_flags;
3099
3100         spin_lock_bh(&map_idr_lock);
3101         map = idr_find(&map_idr, id);
3102         if (map)
3103                 map = __bpf_map_inc_not_zero(map, true);
3104         else
3105                 map = ERR_PTR(-ENOENT);
3106         spin_unlock_bh(&map_idr_lock);
3107
3108         if (IS_ERR(map))
3109                 return PTR_ERR(map);
3110
3111         fd = bpf_map_new_fd(map, f_flags);
3112         if (fd < 0)
3113                 bpf_map_put_with_uref(map);
3114
3115         return fd;
3116 }
3117
3118 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3119                                               unsigned long addr, u32 *off,
3120                                               u32 *type)
3121 {
3122         const struct bpf_map *map;
3123         int i;
3124
3125         for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3126                 map = prog->aux->used_maps[i];
3127                 if (map == (void *)addr) {
3128                         *type = BPF_PSEUDO_MAP_FD;
3129                         return map;
3130                 }
3131                 if (!map->ops->map_direct_value_meta)
3132                         continue;
3133                 if (!map->ops->map_direct_value_meta(map, addr, off)) {
3134                         *type = BPF_PSEUDO_MAP_VALUE;
3135                         return map;
3136                 }
3137         }
3138
3139         return NULL;
3140 }
3141
3142 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
3143 {
3144         const struct bpf_map *map;
3145         struct bpf_insn *insns;
3146         u32 off, type;
3147         u64 imm;
3148         u8 code;
3149         int i;
3150
3151         insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3152                         GFP_USER);
3153         if (!insns)
3154                 return insns;
3155
3156         for (i = 0; i < prog->len; i++) {
3157                 code = insns[i].code;
3158
3159                 if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3160                         insns[i].code = BPF_JMP | BPF_CALL;
3161                         insns[i].imm = BPF_FUNC_tail_call;
3162                         /* fall-through */
3163                 }
3164                 if (code == (BPF_JMP | BPF_CALL) ||
3165                     code == (BPF_JMP | BPF_CALL_ARGS)) {
3166                         if (code == (BPF_JMP | BPF_CALL_ARGS))
3167                                 insns[i].code = BPF_JMP | BPF_CALL;
3168                         if (!bpf_dump_raw_ok())
3169                                 insns[i].imm = 0;
3170                         continue;
3171                 }
3172                 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3173                         insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3174                         continue;
3175                 }
3176
3177                 if (code != (BPF_LD | BPF_IMM | BPF_DW))
3178                         continue;
3179
3180                 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3181                 map = bpf_map_from_imm(prog, imm, &off, &type);
3182                 if (map) {
3183                         insns[i].src_reg = type;
3184                         insns[i].imm = map->id;
3185                         insns[i + 1].imm = off;
3186                         continue;
3187                 }
3188         }
3189
3190         return insns;
3191 }
3192
3193 static int set_info_rec_size(struct bpf_prog_info *info)
3194 {
3195         /*
3196          * Ensure info.*_rec_size is the same as kernel expected size
3197          *
3198          * or
3199          *
3200          * Only allow zero *_rec_size if both _rec_size and _cnt are
3201          * zero.  In this case, the kernel will set the expected
3202          * _rec_size back to the info.
3203          */
3204
3205         if ((info->nr_func_info || info->func_info_rec_size) &&
3206             info->func_info_rec_size != sizeof(struct bpf_func_info))
3207                 return -EINVAL;
3208
3209         if ((info->nr_line_info || info->line_info_rec_size) &&
3210             info->line_info_rec_size != sizeof(struct bpf_line_info))
3211                 return -EINVAL;
3212
3213         if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3214             info->jited_line_info_rec_size != sizeof(__u64))
3215                 return -EINVAL;
3216
3217         info->func_info_rec_size = sizeof(struct bpf_func_info);
3218         info->line_info_rec_size = sizeof(struct bpf_line_info);
3219         info->jited_line_info_rec_size = sizeof(__u64);
3220
3221         return 0;
3222 }
3223
3224 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
3225                                    const union bpf_attr *attr,
3226                                    union bpf_attr __user *uattr)
3227 {
3228         struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3229         struct bpf_prog_info info;
3230         u32 info_len = attr->info.info_len;
3231         struct bpf_prog_stats stats;
3232         char __user *uinsns;
3233         u32 ulen;
3234         int err;
3235
3236         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3237         if (err)
3238                 return err;
3239         info_len = min_t(u32, sizeof(info), info_len);
3240
3241         memset(&info, 0, sizeof(info));
3242         if (copy_from_user(&info, uinfo, info_len))
3243                 return -EFAULT;
3244
3245         info.type = prog->type;
3246         info.id = prog->aux->id;
3247         info.load_time = prog->aux->load_time;
3248         info.created_by_uid = from_kuid_munged(current_user_ns(),
3249                                                prog->aux->user->uid);
3250         info.gpl_compatible = prog->gpl_compatible;
3251
3252         memcpy(info.tag, prog->tag, sizeof(prog->tag));
3253         memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3254
3255         ulen = info.nr_map_ids;
3256         info.nr_map_ids = prog->aux->used_map_cnt;
3257         ulen = min_t(u32, info.nr_map_ids, ulen);
3258         if (ulen) {
3259                 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3260                 u32 i;
3261
3262                 for (i = 0; i < ulen; i++)
3263                         if (put_user(prog->aux->used_maps[i]->id,
3264                                      &user_map_ids[i]))
3265                                 return -EFAULT;
3266         }
3267
3268         err = set_info_rec_size(&info);
3269         if (err)
3270                 return err;
3271
3272         bpf_prog_get_stats(prog, &stats);
3273         info.run_time_ns = stats.nsecs;
3274         info.run_cnt = stats.cnt;
3275
3276         if (!bpf_capable()) {
3277                 info.jited_prog_len = 0;
3278                 info.xlated_prog_len = 0;
3279                 info.nr_jited_ksyms = 0;
3280                 info.nr_jited_func_lens = 0;
3281                 info.nr_func_info = 0;
3282                 info.nr_line_info = 0;
3283                 info.nr_jited_line_info = 0;
3284                 goto done;
3285         }
3286
3287         ulen = info.xlated_prog_len;
3288         info.xlated_prog_len = bpf_prog_insn_size(prog);
3289         if (info.xlated_prog_len && ulen) {
3290                 struct bpf_insn *insns_sanitized;
3291                 bool fault;
3292
3293                 if (prog->blinded && !bpf_dump_raw_ok()) {
3294                         info.xlated_prog_insns = 0;
3295                         goto done;
3296                 }
3297                 insns_sanitized = bpf_insn_prepare_dump(prog);
3298                 if (!insns_sanitized)
3299                         return -ENOMEM;
3300                 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3301                 ulen = min_t(u32, info.xlated_prog_len, ulen);
3302                 fault = copy_to_user(uinsns, insns_sanitized, ulen);
3303                 kfree(insns_sanitized);
3304                 if (fault)
3305                         return -EFAULT;
3306         }
3307
3308         if (bpf_prog_is_dev_bound(prog->aux)) {
3309                 err = bpf_prog_offload_info_fill(&info, prog);
3310                 if (err)
3311                         return err;
3312                 goto done;
3313         }
3314
3315         /* NOTE: the following code is supposed to be skipped for offload.
3316          * bpf_prog_offload_info_fill() is the place to fill similar fields
3317          * for offload.
3318          */
3319         ulen = info.jited_prog_len;
3320         if (prog->aux->func_cnt) {
3321                 u32 i;
3322
3323                 info.jited_prog_len = 0;
3324                 for (i = 0; i < prog->aux->func_cnt; i++)
3325                         info.jited_prog_len += prog->aux->func[i]->jited_len;
3326         } else {
3327                 info.jited_prog_len = prog->jited_len;
3328         }
3329
3330         if (info.jited_prog_len && ulen) {
3331                 if (bpf_dump_raw_ok()) {
3332                         uinsns = u64_to_user_ptr(info.jited_prog_insns);
3333                         ulen = min_t(u32, info.jited_prog_len, ulen);
3334
3335                         /* for multi-function programs, copy the JITed
3336                          * instructions for all the functions
3337                          */
3338                         if (prog->aux->func_cnt) {
3339                                 u32 len, free, i;
3340                                 u8 *img;
3341
3342                                 free = ulen;
3343                                 for (i = 0; i < prog->aux->func_cnt; i++) {
3344                                         len = prog->aux->func[i]->jited_len;
3345                                         len = min_t(u32, len, free);
3346                                         img = (u8 *) prog->aux->func[i]->bpf_func;
3347                                         if (copy_to_user(uinsns, img, len))
3348                                                 return -EFAULT;
3349                                         uinsns += len;
3350                                         free -= len;
3351                                         if (!free)
3352                                                 break;
3353                                 }
3354                         } else {
3355                                 if (copy_to_user(uinsns, prog->bpf_func, ulen))
3356                                         return -EFAULT;
3357                         }
3358                 } else {
3359                         info.jited_prog_insns = 0;
3360                 }
3361         }
3362
3363         ulen = info.nr_jited_ksyms;
3364         info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3365         if (ulen) {
3366                 if (bpf_dump_raw_ok()) {
3367                         unsigned long ksym_addr;
3368                         u64 __user *user_ksyms;
3369                         u32 i;
3370
3371                         /* copy the address of the kernel symbol
3372                          * corresponding to each function
3373                          */
3374                         ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3375                         user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3376                         if (prog->aux->func_cnt) {
3377                                 for (i = 0; i < ulen; i++) {
3378                                         ksym_addr = (unsigned long)
3379                                                 prog->aux->func[i]->bpf_func;
3380                                         if (put_user((u64) ksym_addr,
3381                                                      &user_ksyms[i]))
3382                                                 return -EFAULT;
3383                                 }
3384                         } else {
3385                                 ksym_addr = (unsigned long) prog->bpf_func;
3386                                 if (put_user((u64) ksym_addr, &user_ksyms[0]))
3387                                         return -EFAULT;
3388                         }
3389                 } else {
3390                         info.jited_ksyms = 0;
3391                 }
3392         }
3393
3394         ulen = info.nr_jited_func_lens;
3395         info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3396         if (ulen) {
3397                 if (bpf_dump_raw_ok()) {
3398                         u32 __user *user_lens;
3399                         u32 func_len, i;
3400
3401                         /* copy the JITed image lengths for each function */
3402                         ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3403                         user_lens = u64_to_user_ptr(info.jited_func_lens);
3404                         if (prog->aux->func_cnt) {
3405                                 for (i = 0; i < ulen; i++) {
3406                                         func_len =
3407                                                 prog->aux->func[i]->jited_len;
3408                                         if (put_user(func_len, &user_lens[i]))
3409                                                 return -EFAULT;
3410                                 }
3411                         } else {
3412                                 func_len = prog->jited_len;
3413                                 if (put_user(func_len, &user_lens[0]))
3414                                         return -EFAULT;
3415                         }
3416                 } else {
3417                         info.jited_func_lens = 0;
3418                 }
3419         }
3420
3421         if (prog->aux->btf)
3422                 info.btf_id = btf_id(prog->aux->btf);
3423
3424         ulen = info.nr_func_info;
3425         info.nr_func_info = prog->aux->func_info_cnt;
3426         if (info.nr_func_info && ulen) {
3427                 char __user *user_finfo;
3428
3429                 user_finfo = u64_to_user_ptr(info.func_info);
3430                 ulen = min_t(u32, info.nr_func_info, ulen);
3431                 if (copy_to_user(user_finfo, prog->aux->func_info,
3432                                  info.func_info_rec_size * ulen))
3433                         return -EFAULT;
3434         }
3435
3436         ulen = info.nr_line_info;
3437         info.nr_line_info = prog->aux->nr_linfo;
3438         if (info.nr_line_info && ulen) {
3439                 __u8 __user *user_linfo;
3440
3441                 user_linfo = u64_to_user_ptr(info.line_info);
3442                 ulen = min_t(u32, info.nr_line_info, ulen);
3443                 if (copy_to_user(user_linfo, prog->aux->linfo,
3444                                  info.line_info_rec_size * ulen))
3445                         return -EFAULT;
3446         }
3447
3448         ulen = info.nr_jited_line_info;
3449         if (prog->aux->jited_linfo)
3450                 info.nr_jited_line_info = prog->aux->nr_linfo;
3451         else
3452                 info.nr_jited_line_info = 0;
3453         if (info.nr_jited_line_info && ulen) {
3454                 if (bpf_dump_raw_ok()) {
3455                         __u64 __user *user_linfo;
3456                         u32 i;
3457
3458                         user_linfo = u64_to_user_ptr(info.jited_line_info);
3459                         ulen = min_t(u32, info.nr_jited_line_info, ulen);
3460                         for (i = 0; i < ulen; i++) {
3461                                 if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3462                                              &user_linfo[i]))
3463                                         return -EFAULT;
3464                         }
3465                 } else {
3466                         info.jited_line_info = 0;
3467                 }
3468         }
3469
3470         ulen = info.nr_prog_tags;
3471         info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3472         if (ulen) {
3473                 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3474                 u32 i;
3475
3476                 user_prog_tags = u64_to_user_ptr(info.prog_tags);
3477                 ulen = min_t(u32, info.nr_prog_tags, ulen);
3478                 if (prog->aux->func_cnt) {
3479                         for (i = 0; i < ulen; i++) {
3480                                 if (copy_to_user(user_prog_tags[i],
3481                                                  prog->aux->func[i]->tag,
3482                                                  BPF_TAG_SIZE))
3483                                         return -EFAULT;
3484                         }
3485                 } else {
3486                         if (copy_to_user(user_prog_tags[0],
3487                                          prog->tag, BPF_TAG_SIZE))
3488                                 return -EFAULT;
3489                 }
3490         }
3491
3492 done:
3493         if (copy_to_user(uinfo, &info, info_len) ||
3494             put_user(info_len, &uattr->info.info_len))
3495                 return -EFAULT;
3496
3497         return 0;
3498 }
3499
3500 static int bpf_map_get_info_by_fd(struct bpf_map *map,
3501                                   const union bpf_attr *attr,
3502                                   union bpf_attr __user *uattr)
3503 {
3504         struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3505         struct bpf_map_info info;
3506         u32 info_len = attr->info.info_len;
3507         int err;
3508
3509         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3510         if (err)
3511                 return err;
3512         info_len = min_t(u32, sizeof(info), info_len);
3513
3514         memset(&info, 0, sizeof(info));
3515         info.type = map->map_type;
3516         info.id = map->id;
3517         info.key_size = map->key_size;
3518         info.value_size = map->value_size;
3519         info.max_entries = map->max_entries;
3520         info.map_flags = map->map_flags;
3521         memcpy(info.name, map->name, sizeof(map->name));
3522
3523         if (map->btf) {
3524                 info.btf_id = btf_id(map->btf);
3525                 info.btf_key_type_id = map->btf_key_type_id;
3526                 info.btf_value_type_id = map->btf_value_type_id;
3527         }
3528         info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3529
3530         if (bpf_map_is_dev_bound(map)) {
3531                 err = bpf_map_offload_info_fill(&info, map);
3532                 if (err)
3533                         return err;
3534         }
3535
3536         if (copy_to_user(uinfo, &info, info_len) ||
3537             put_user(info_len, &uattr->info.info_len))
3538                 return -EFAULT;
3539
3540         return 0;
3541 }
3542
3543 static int bpf_btf_get_info_by_fd(struct btf *btf,
3544                                   const union bpf_attr *attr,
3545                                   union bpf_attr __user *uattr)
3546 {
3547         struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3548         u32 info_len = attr->info.info_len;
3549         int err;
3550
3551         err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
3552         if (err)
3553                 return err;
3554
3555         return btf_get_info_by_fd(btf, attr, uattr);
3556 }
3557
3558 static int bpf_link_get_info_by_fd(struct bpf_link *link,
3559                                   const union bpf_attr *attr,
3560                                   union bpf_attr __user *uattr)
3561 {
3562         struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3563         struct bpf_link_info info;
3564         u32 info_len = attr->info.info_len;
3565         int err;
3566
3567         err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3568         if (err)
3569                 return err;
3570         info_len = min_t(u32, sizeof(info), info_len);
3571
3572         memset(&info, 0, sizeof(info));
3573         if (copy_from_user(&info, uinfo, info_len))
3574                 return -EFAULT;
3575
3576         info.type = link->type;
3577         info.id = link->id;
3578         info.prog_id = link->prog->aux->id;
3579
3580         if (link->ops->fill_link_info) {
3581                 err = link->ops->fill_link_info(link, &info);
3582                 if (err)
3583                         return err;
3584         }
3585
3586         if (copy_to_user(uinfo, &info, info_len) ||
3587             put_user(info_len, &uattr->info.info_len))
3588                 return -EFAULT;
3589
3590         return 0;
3591 }
3592
3593
3594 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3595
3596 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3597                                   union bpf_attr __user *uattr)
3598 {
3599         int ufd = attr->info.bpf_fd;
3600         struct fd f;
3601         int err;
3602
3603         if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3604                 return -EINVAL;
3605
3606         f = fdget(ufd);
3607         if (!f.file)
3608                 return -EBADFD;
3609
3610         if (f.file->f_op == &bpf_prog_fops)
3611                 err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
3612                                               uattr);
3613         else if (f.file->f_op == &bpf_map_fops)
3614                 err = bpf_map_get_info_by_fd(f.file->private_data, attr,
3615                                              uattr);
3616         else if (f.file->f_op == &btf_fops)
3617                 err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
3618         else if (f.file->f_op == &bpf_link_fops)
3619                 err = bpf_link_get_info_by_fd(f.file->private_data,
3620                                               attr, uattr);
3621         else
3622                 err = -EINVAL;
3623
3624         fdput(f);
3625         return err;
3626 }
3627
3628 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3629
3630 static int bpf_btf_load(const union bpf_attr *attr)
3631 {
3632         if (CHECK_ATTR(BPF_BTF_LOAD))
3633                 return -EINVAL;
3634
3635         if (!bpf_capable())
3636                 return -EPERM;
3637
3638         return btf_new_fd(attr);
3639 }
3640
3641 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3642
3643 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3644 {
3645         if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
3646                 return -EINVAL;
3647
3648         if (!capable(CAP_SYS_ADMIN))
3649                 return -EPERM;
3650
3651         return btf_get_fd_by_id(attr->btf_id);
3652 }
3653
3654 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
3655                                     union bpf_attr __user *uattr,
3656                                     u32 prog_id, u32 fd_type,
3657                                     const char *buf, u64 probe_offset,
3658                                     u64 probe_addr)
3659 {
3660         char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
3661         u32 len = buf ? strlen(buf) : 0, input_len;
3662         int err = 0;
3663
3664         if (put_user(len, &uattr->task_fd_query.buf_len))
3665                 return -EFAULT;
3666         input_len = attr->task_fd_query.buf_len;
3667         if (input_len && ubuf) {
3668                 if (!len) {
3669                         /* nothing to copy, just make ubuf NULL terminated */
3670                         char zero = '\0';
3671
3672                         if (put_user(zero, ubuf))
3673                                 return -EFAULT;
3674                 } else if (input_len >= len + 1) {
3675                         /* ubuf can hold the string with NULL terminator */
3676                         if (copy_to_user(ubuf, buf, len + 1))
3677                                 return -EFAULT;
3678                 } else {
3679                         /* ubuf cannot hold the string with NULL terminator,
3680                          * do a partial copy with NULL terminator.
3681                          */
3682                         char zero = '\0';
3683
3684                         err = -ENOSPC;
3685                         if (copy_to_user(ubuf, buf, input_len - 1))
3686                                 return -EFAULT;
3687                         if (put_user(zero, ubuf + input_len - 1))
3688                                 return -EFAULT;
3689                 }
3690         }
3691
3692         if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
3693             put_user(fd_type, &uattr->task_fd_query.fd_type) ||
3694             put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
3695             put_user(probe_addr, &uattr->task_fd_query.probe_addr))
3696                 return -EFAULT;
3697
3698         return err;
3699 }
3700
3701 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
3702
3703 static int bpf_task_fd_query(const union bpf_attr *attr,
3704                              union bpf_attr __user *uattr)
3705 {
3706         pid_t pid = attr->task_fd_query.pid;
3707         u32 fd = attr->task_fd_query.fd;
3708         const struct perf_event *event;
3709         struct files_struct *files;
3710         struct task_struct *task;
3711         struct file *file;
3712         int err;
3713
3714         if (CHECK_ATTR(BPF_TASK_FD_QUERY))
3715                 return -EINVAL;
3716
3717         if (!capable(CAP_SYS_ADMIN))
3718                 return -EPERM;
3719
3720         if (attr->task_fd_query.flags != 0)
3721                 return -EINVAL;
3722
3723         task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3724         if (!task)
3725                 return -ENOENT;
3726
3727         files = get_files_struct(task);
3728         put_task_struct(task);
3729         if (!files)
3730                 return -ENOENT;
3731
3732         err = 0;
3733         spin_lock(&files->file_lock);
3734         file = fcheck_files(files, fd);
3735         if (!file)
3736                 err = -EBADF;
3737         else
3738                 get_file(file);
3739         spin_unlock(&files->file_lock);
3740         put_files_struct(files);
3741
3742         if (err)
3743                 goto out;
3744
3745         if (file->f_op == &bpf_link_fops) {
3746                 struct bpf_link *link = file->private_data;
3747
3748                 if (link->ops == &bpf_raw_tp_link_lops) {
3749                         struct bpf_raw_tp_link *raw_tp =
3750                                 container_of(link, struct bpf_raw_tp_link, link);
3751                         struct bpf_raw_event_map *btp = raw_tp->btp;
3752
3753                         err = bpf_task_fd_query_copy(attr, uattr,
3754                                                      raw_tp->link.prog->aux->id,
3755                                                      BPF_FD_TYPE_RAW_TRACEPOINT,
3756                                                      btp->tp->name, 0, 0);
3757                         goto put_file;
3758                 }
3759                 goto out_not_supp;
3760         }
3761
3762         event = perf_get_event(file);
3763         if (!IS_ERR(event)) {
3764                 u64 probe_offset, probe_addr;
3765                 u32 prog_id, fd_type;
3766                 const char *buf;
3767
3768                 err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
3769                                               &buf, &probe_offset,
3770                                               &probe_addr);
3771                 if (!err)
3772                         err = bpf_task_fd_query_copy(attr, uattr, prog_id,
3773                                                      fd_type, buf,
3774                                                      probe_offset,
3775                                                      probe_addr);
3776                 goto put_file;
3777         }
3778
3779 out_not_supp:
3780         err = -ENOTSUPP;
3781 put_file:
3782         fput(file);
3783 out:
3784         return err;
3785 }
3786
3787 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
3788
3789 #define BPF_DO_BATCH(fn)                        \
3790         do {                                    \
3791                 if (!fn) {                      \
3792                         err = -ENOTSUPP;        \
3793                         goto err_put;           \
3794                 }                               \
3795                 err = fn(map, attr, uattr);     \
3796         } while (0)
3797
3798 static int bpf_map_do_batch(const union bpf_attr *attr,
3799                             union bpf_attr __user *uattr,
3800                             int cmd)
3801 {
3802         struct bpf_map *map;
3803         int err, ufd;
3804         struct fd f;
3805
3806         if (CHECK_ATTR(BPF_MAP_BATCH))
3807                 return -EINVAL;
3808
3809         ufd = attr->batch.map_fd;
3810         f = fdget(ufd);
3811         map = __bpf_map_get(f);
3812         if (IS_ERR(map))
3813                 return PTR_ERR(map);
3814
3815         if ((cmd == BPF_MAP_LOOKUP_BATCH ||
3816              cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
3817             !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
3818                 err = -EPERM;
3819                 goto err_put;
3820         }
3821
3822         if (cmd != BPF_MAP_LOOKUP_BATCH &&
3823             !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
3824                 err = -EPERM;
3825                 goto err_put;
3826         }
3827
3828         if (cmd == BPF_MAP_LOOKUP_BATCH)
3829                 BPF_DO_BATCH(map->ops->map_lookup_batch);
3830         else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
3831                 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
3832         else if (cmd == BPF_MAP_UPDATE_BATCH)
3833                 BPF_DO_BATCH(map->ops->map_update_batch);
3834         else
3835                 BPF_DO_BATCH(map->ops->map_delete_batch);
3836
3837 err_put:
3838         fdput(f);
3839         return err;
3840 }
3841
3842 static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3843 {
3844         if (attr->link_create.attach_type == BPF_TRACE_ITER &&
3845             prog->expected_attach_type == BPF_TRACE_ITER)
3846                 return bpf_iter_link_attach(attr, prog);
3847
3848         return -EINVAL;
3849 }
3850
3851 #define BPF_LINK_CREATE_LAST_FIELD link_create.flags
3852 static int link_create(union bpf_attr *attr)
3853 {
3854         enum bpf_prog_type ptype;
3855         struct bpf_prog *prog;
3856         int ret;
3857
3858         if (CHECK_ATTR(BPF_LINK_CREATE))
3859                 return -EINVAL;
3860
3861         ptype = attach_type_to_prog_type(attr->link_create.attach_type);
3862         if (ptype == BPF_PROG_TYPE_UNSPEC)
3863                 return -EINVAL;
3864
3865         prog = bpf_prog_get_type(attr->link_create.prog_fd, ptype);
3866         if (IS_ERR(prog))
3867                 return PTR_ERR(prog);
3868
3869         ret = bpf_prog_attach_check_attach_type(prog,
3870                                                 attr->link_create.attach_type);
3871         if (ret)
3872                 goto err_out;
3873
3874         switch (ptype) {
3875         case BPF_PROG_TYPE_CGROUP_SKB:
3876         case BPF_PROG_TYPE_CGROUP_SOCK:
3877         case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3878         case BPF_PROG_TYPE_SOCK_OPS:
3879         case BPF_PROG_TYPE_CGROUP_DEVICE:
3880         case BPF_PROG_TYPE_CGROUP_SYSCTL:
3881         case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3882                 ret = cgroup_bpf_link_attach(attr, prog);
3883                 break;
3884         case BPF_PROG_TYPE_TRACING:
3885                 ret = tracing_bpf_link_attach(attr, prog);
3886                 break;
3887         case BPF_PROG_TYPE_FLOW_DISSECTOR:
3888                 ret = netns_bpf_link_create(attr, prog);
3889                 break;
3890         default:
3891                 ret = -EINVAL;
3892         }
3893
3894 err_out:
3895         if (ret < 0)
3896                 bpf_prog_put(prog);
3897         return ret;
3898 }
3899
3900 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
3901
3902 static int link_update(union bpf_attr *attr)
3903 {
3904         struct bpf_prog *old_prog = NULL, *new_prog;
3905         struct bpf_link *link;
3906         u32 flags;
3907         int ret;
3908
3909         if (CHECK_ATTR(BPF_LINK_UPDATE))
3910                 return -EINVAL;
3911
3912         flags = attr->link_update.flags;
3913         if (flags & ~BPF_F_REPLACE)
3914                 return -EINVAL;
3915
3916         link = bpf_link_get_from_fd(attr->link_update.link_fd);
3917         if (IS_ERR(link))
3918                 return PTR_ERR(link);
3919
3920         new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
3921         if (IS_ERR(new_prog)) {
3922                 ret = PTR_ERR(new_prog);
3923                 goto out_put_link;
3924         }
3925
3926         if (flags & BPF_F_REPLACE) {
3927                 old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
3928                 if (IS_ERR(old_prog)) {
3929                         ret = PTR_ERR(old_prog);
3930                         old_prog = NULL;
3931                         goto out_put_progs;
3932                 }
3933         } else if (attr->link_update.old_prog_fd) {
3934                 ret = -EINVAL;
3935                 goto out_put_progs;
3936         }
3937
3938         if (link->ops->update_prog)
3939                 ret = link->ops->update_prog(link, new_prog, old_prog);
3940         else
3941                 ret = -EINVAL;
3942
3943 out_put_progs:
3944         if (old_prog)
3945                 bpf_prog_put(old_prog);
3946         if (ret)
3947                 bpf_prog_put(new_prog);
3948 out_put_link:
3949         bpf_link_put(link);
3950         return ret;
3951 }
3952
3953 static int bpf_link_inc_not_zero(struct bpf_link *link)
3954 {
3955         return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT;
3956 }
3957
3958 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
3959
3960 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
3961 {
3962         struct bpf_link *link;
3963         u32 id = attr->link_id;
3964         int fd, err;
3965
3966         if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
3967                 return -EINVAL;
3968
3969         if (!capable(CAP_SYS_ADMIN))
3970                 return -EPERM;
3971
3972         spin_lock_bh(&link_idr_lock);
3973         link = idr_find(&link_idr, id);
3974         /* before link is "settled", ID is 0, pretend it doesn't exist yet */
3975         if (link) {
3976                 if (link->id)
3977                         err = bpf_link_inc_not_zero(link);
3978                 else
3979                         err = -EAGAIN;
3980         } else {
3981                 err = -ENOENT;
3982         }
3983         spin_unlock_bh(&link_idr_lock);
3984
3985         if (err)
3986                 return err;
3987
3988         fd = bpf_link_new_fd(link);
3989         if (fd < 0)
3990                 bpf_link_put(link);
3991
3992         return fd;
3993 }
3994
3995 DEFINE_MUTEX(bpf_stats_enabled_mutex);
3996
3997 static int bpf_stats_release(struct inode *inode, struct file *file)
3998 {
3999         mutex_lock(&bpf_stats_enabled_mutex);
4000         static_key_slow_dec(&bpf_stats_enabled_key.key);
4001         mutex_unlock(&bpf_stats_enabled_mutex);
4002         return 0;
4003 }
4004
4005 static const struct file_operations bpf_stats_fops = {
4006         .release = bpf_stats_release,
4007 };
4008
4009 static int bpf_enable_runtime_stats(void)
4010 {
4011         int fd;
4012
4013         mutex_lock(&bpf_stats_enabled_mutex);
4014
4015         /* Set a very high limit to avoid overflow */
4016         if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4017                 mutex_unlock(&bpf_stats_enabled_mutex);
4018                 return -EBUSY;
4019         }
4020
4021         fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4022         if (fd >= 0)
4023                 static_key_slow_inc(&bpf_stats_enabled_key.key);
4024
4025         mutex_unlock(&bpf_stats_enabled_mutex);
4026         return fd;
4027 }
4028
4029 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4030
4031 static int bpf_enable_stats(union bpf_attr *attr)
4032 {
4033
4034         if (CHECK_ATTR(BPF_ENABLE_STATS))
4035                 return -EINVAL;
4036
4037         if (!capable(CAP_SYS_ADMIN))
4038                 return -EPERM;
4039
4040         switch (attr->enable_stats.type) {
4041         case BPF_STATS_RUN_TIME:
4042                 return bpf_enable_runtime_stats();
4043         default:
4044                 break;
4045         }
4046         return -EINVAL;
4047 }
4048
4049 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4050
4051 static int bpf_iter_create(union bpf_attr *attr)
4052 {
4053         struct bpf_link *link;
4054         int err;
4055
4056         if (CHECK_ATTR(BPF_ITER_CREATE))
4057                 return -EINVAL;
4058
4059         if (attr->iter_create.flags)
4060                 return -EINVAL;
4061
4062         link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4063         if (IS_ERR(link))
4064                 return PTR_ERR(link);
4065
4066         err = bpf_iter_new_fd(link);
4067         bpf_link_put(link);
4068
4069         return err;
4070 }
4071
4072 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4073 {
4074         union bpf_attr attr;
4075         int err;
4076
4077         if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4078                 return -EPERM;
4079
4080         err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4081         if (err)
4082                 return err;
4083         size = min_t(u32, size, sizeof(attr));
4084
4085         /* copy attributes from user space, may be less than sizeof(bpf_attr) */
4086         memset(&attr, 0, sizeof(attr));
4087         if (copy_from_user(&attr, uattr, size) != 0)
4088                 return -EFAULT;
4089
4090         err = security_bpf(cmd, &attr, size);
4091         if (err < 0)
4092                 return err;
4093
4094         switch (cmd) {
4095         case BPF_MAP_CREATE:
4096                 err = map_create(&attr);
4097                 break;
4098         case BPF_MAP_LOOKUP_ELEM:
4099                 err = map_lookup_elem(&attr);
4100                 break;
4101         case BPF_MAP_UPDATE_ELEM:
4102                 err = map_update_elem(&attr);
4103                 break;
4104         case BPF_MAP_DELETE_ELEM:
4105                 err = map_delete_elem(&attr);
4106                 break;
4107         case BPF_MAP_GET_NEXT_KEY:
4108                 err = map_get_next_key(&attr);
4109                 break;
4110         case BPF_MAP_FREEZE:
4111                 err = map_freeze(&attr);
4112                 break;
4113         case BPF_PROG_LOAD:
4114                 err = bpf_prog_load(&attr, uattr);
4115                 break;
4116         case BPF_OBJ_PIN:
4117                 err = bpf_obj_pin(&attr);
4118                 break;
4119         case BPF_OBJ_GET:
4120                 err = bpf_obj_get(&attr);
4121                 break;
4122         case BPF_PROG_ATTACH:
4123                 err = bpf_prog_attach(&attr);
4124                 break;
4125         case BPF_PROG_DETACH:
4126                 err = bpf_prog_detach(&attr);
4127                 break;
4128         case BPF_PROG_QUERY:
4129                 err = bpf_prog_query(&attr, uattr);
4130                 break;
4131         case BPF_PROG_TEST_RUN:
4132                 err = bpf_prog_test_run(&attr, uattr);
4133                 break;
4134         case BPF_PROG_GET_NEXT_ID:
4135                 err = bpf_obj_get_next_id(&attr, uattr,
4136                                           &prog_idr, &prog_idr_lock);
4137                 break;
4138         case BPF_MAP_GET_NEXT_ID:
4139                 err = bpf_obj_get_next_id(&attr, uattr,
4140                                           &map_idr, &map_idr_lock);
4141                 break;
4142         case BPF_BTF_GET_NEXT_ID:
4143                 err = bpf_obj_get_next_id(&attr, uattr,
4144                                           &btf_idr, &btf_idr_lock);
4145                 break;
4146         case BPF_PROG_GET_FD_BY_ID:
4147                 err = bpf_prog_get_fd_by_id(&attr);
4148                 break;
4149         case BPF_MAP_GET_FD_BY_ID:
4150                 err = bpf_map_get_fd_by_id(&attr);
4151                 break;
4152         case BPF_OBJ_GET_INFO_BY_FD:
4153                 err = bpf_obj_get_info_by_fd(&attr, uattr);
4154                 break;
4155         case BPF_RAW_TRACEPOINT_OPEN:
4156                 err = bpf_raw_tracepoint_open(&attr);
4157                 break;
4158         case BPF_BTF_LOAD:
4159                 err = bpf_btf_load(&attr);
4160                 break;
4161         case BPF_BTF_GET_FD_BY_ID:
4162                 err = bpf_btf_get_fd_by_id(&attr);
4163                 break;
4164         case BPF_TASK_FD_QUERY:
4165                 err = bpf_task_fd_query(&attr, uattr);
4166                 break;
4167         case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4168                 err = map_lookup_and_delete_elem(&attr);
4169                 break;
4170         case BPF_MAP_LOOKUP_BATCH:
4171                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
4172                 break;
4173         case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4174                 err = bpf_map_do_batch(&attr, uattr,
4175                                        BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4176                 break;
4177         case BPF_MAP_UPDATE_BATCH:
4178                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
4179                 break;
4180         case BPF_MAP_DELETE_BATCH:
4181                 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
4182                 break;
4183         case BPF_LINK_CREATE:
4184                 err = link_create(&attr);
4185                 break;
4186         case BPF_LINK_UPDATE:
4187                 err = link_update(&attr);
4188                 break;
4189         case BPF_LINK_GET_FD_BY_ID:
4190                 err = bpf_link_get_fd_by_id(&attr);
4191                 break;
4192         case BPF_LINK_GET_NEXT_ID:
4193                 err = bpf_obj_get_next_id(&attr, uattr,
4194                                           &link_idr, &link_idr_lock);
4195                 break;
4196         case BPF_ENABLE_STATS:
4197                 err = bpf_enable_stats(&attr);
4198                 break;
4199         case BPF_ITER_CREATE:
4200                 err = bpf_iter_create(&attr);
4201                 break;
4202         default:
4203                 err = -EINVAL;
4204                 break;
4205         }
4206
4207         return err;
4208 }