Merge remote-tracking branch 'torvalds/master' into perf/core
[linux-2.6-microblaze.git] / kernel / trace / bpf_trace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/btf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/syscalls.h>
17 #include <linux/error-injection.h>
18 #include <linux/btf_ids.h>
19 #include <linux/bpf_lsm.h>
20
21 #include <net/bpf_sk_storage.h>
22
23 #include <uapi/linux/bpf.h>
24 #include <uapi/linux/btf.h>
25
26 #include <asm/tlb.h>
27
28 #include "trace_probe.h"
29 #include "trace.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "bpf_trace.h"
33
34 #define bpf_event_rcu_dereference(p)                                    \
35         rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
36
37 #ifdef CONFIG_MODULES
38 struct bpf_trace_module {
39         struct module *module;
40         struct list_head list;
41 };
42
43 static LIST_HEAD(bpf_trace_modules);
44 static DEFINE_MUTEX(bpf_module_mutex);
45
46 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
47 {
48         struct bpf_raw_event_map *btp, *ret = NULL;
49         struct bpf_trace_module *btm;
50         unsigned int i;
51
52         mutex_lock(&bpf_module_mutex);
53         list_for_each_entry(btm, &bpf_trace_modules, list) {
54                 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
55                         btp = &btm->module->bpf_raw_events[i];
56                         if (!strcmp(btp->tp->name, name)) {
57                                 if (try_module_get(btm->module))
58                                         ret = btp;
59                                 goto out;
60                         }
61                 }
62         }
63 out:
64         mutex_unlock(&bpf_module_mutex);
65         return ret;
66 }
67 #else
68 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
69 {
70         return NULL;
71 }
72 #endif /* CONFIG_MODULES */
73
74 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
75 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
76
77 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
78                                   u64 flags, const struct btf **btf,
79                                   s32 *btf_id);
80
81 /**
82  * trace_call_bpf - invoke BPF program
83  * @call: tracepoint event
84  * @ctx: opaque context pointer
85  *
86  * kprobe handlers execute BPF programs via this helper.
87  * Can be used from static tracepoints in the future.
88  *
89  * Return: BPF programs always return an integer which is interpreted by
90  * kprobe handler as:
91  * 0 - return from kprobe (event is filtered out)
92  * 1 - store kprobe event into ring buffer
93  * Other values are reserved and currently alias to 1
94  */
95 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
96 {
97         unsigned int ret;
98
99         cant_sleep();
100
101         if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
102                 /*
103                  * since some bpf program is already running on this cpu,
104                  * don't call into another bpf program (same or different)
105                  * and don't send kprobe event into ring-buffer,
106                  * so return zero here
107                  */
108                 ret = 0;
109                 goto out;
110         }
111
112         /*
113          * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114          * to all call sites, we did a bpf_prog_array_valid() there to check
115          * whether call->prog_array is empty or not, which is
116          * a heuristic to speed up execution.
117          *
118          * If bpf_prog_array_valid() fetched prog_array was
119          * non-NULL, we go into trace_call_bpf() and do the actual
120          * proper rcu_dereference() under RCU lock.
121          * If it turns out that prog_array is NULL then, we bail out.
122          * For the opposite, if the bpf_prog_array_valid() fetched pointer
123          * was NULL, you'll skip the prog_array with the risk of missing
124          * out of events when it was updated in between this and the
125          * rcu_dereference() which is accepted risk.
126          */
127         ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
128
129  out:
130         __this_cpu_dec(bpf_prog_active);
131
132         return ret;
133 }
134
135 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
136 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
137 {
138         regs_set_return_value(regs, rc);
139         override_function_with_return(regs);
140         return 0;
141 }
142
143 static const struct bpf_func_proto bpf_override_return_proto = {
144         .func           = bpf_override_return,
145         .gpl_only       = true,
146         .ret_type       = RET_INTEGER,
147         .arg1_type      = ARG_PTR_TO_CTX,
148         .arg2_type      = ARG_ANYTHING,
149 };
150 #endif
151
152 static __always_inline int
153 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
154 {
155         int ret;
156
157         ret = copy_from_user_nofault(dst, unsafe_ptr, size);
158         if (unlikely(ret < 0))
159                 memset(dst, 0, size);
160         return ret;
161 }
162
163 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164            const void __user *, unsafe_ptr)
165 {
166         return bpf_probe_read_user_common(dst, size, unsafe_ptr);
167 }
168
169 const struct bpf_func_proto bpf_probe_read_user_proto = {
170         .func           = bpf_probe_read_user,
171         .gpl_only       = true,
172         .ret_type       = RET_INTEGER,
173         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
174         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
175         .arg3_type      = ARG_ANYTHING,
176 };
177
178 static __always_inline int
179 bpf_probe_read_user_str_common(void *dst, u32 size,
180                                const void __user *unsafe_ptr)
181 {
182         int ret;
183
184         /*
185          * NB: We rely on strncpy_from_user() not copying junk past the NUL
186          * terminator into `dst`.
187          *
188          * strncpy_from_user() does long-sized strides in the fast path. If the
189          * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
190          * then there could be junk after the NUL in `dst`. If user takes `dst`
191          * and keys a hash map with it, then semantically identical strings can
192          * occupy multiple entries in the map.
193          */
194         ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
195         if (unlikely(ret < 0))
196                 memset(dst, 0, size);
197         return ret;
198 }
199
200 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
201            const void __user *, unsafe_ptr)
202 {
203         return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
204 }
205
206 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
207         .func           = bpf_probe_read_user_str,
208         .gpl_only       = true,
209         .ret_type       = RET_INTEGER,
210         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
211         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
212         .arg3_type      = ARG_ANYTHING,
213 };
214
215 static __always_inline int
216 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
217 {
218         int ret;
219
220         ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
221         if (unlikely(ret < 0))
222                 memset(dst, 0, size);
223         return ret;
224 }
225
226 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
227            const void *, unsafe_ptr)
228 {
229         return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
230 }
231
232 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
233         .func           = bpf_probe_read_kernel,
234         .gpl_only       = true,
235         .ret_type       = RET_INTEGER,
236         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
237         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
238         .arg3_type      = ARG_ANYTHING,
239 };
240
241 static __always_inline int
242 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
243 {
244         int ret;
245
246         /*
247          * The strncpy_from_kernel_nofault() call will likely not fill the
248          * entire buffer, but that's okay in this circumstance as we're probing
249          * arbitrary memory anyway similar to bpf_probe_read_*() and might
250          * as well probe the stack. Thus, memory is explicitly cleared
251          * only in error case, so that improper users ignoring return
252          * code altogether don't copy garbage; otherwise length of string
253          * is returned that can be used for bpf_perf_event_output() et al.
254          */
255         ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
256         if (unlikely(ret < 0))
257                 memset(dst, 0, size);
258         return ret;
259 }
260
261 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
262            const void *, unsafe_ptr)
263 {
264         return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
265 }
266
267 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
268         .func           = bpf_probe_read_kernel_str,
269         .gpl_only       = true,
270         .ret_type       = RET_INTEGER,
271         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
272         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
273         .arg3_type      = ARG_ANYTHING,
274 };
275
276 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
277 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
278            const void *, unsafe_ptr)
279 {
280         if ((unsigned long)unsafe_ptr < TASK_SIZE) {
281                 return bpf_probe_read_user_common(dst, size,
282                                 (__force void __user *)unsafe_ptr);
283         }
284         return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
285 }
286
287 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
288         .func           = bpf_probe_read_compat,
289         .gpl_only       = true,
290         .ret_type       = RET_INTEGER,
291         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
292         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
293         .arg3_type      = ARG_ANYTHING,
294 };
295
296 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
297            const void *, unsafe_ptr)
298 {
299         if ((unsigned long)unsafe_ptr < TASK_SIZE) {
300                 return bpf_probe_read_user_str_common(dst, size,
301                                 (__force void __user *)unsafe_ptr);
302         }
303         return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
304 }
305
306 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
307         .func           = bpf_probe_read_compat_str,
308         .gpl_only       = true,
309         .ret_type       = RET_INTEGER,
310         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
311         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
312         .arg3_type      = ARG_ANYTHING,
313 };
314 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
315
316 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
317            u32, size)
318 {
319         /*
320          * Ensure we're in user context which is safe for the helper to
321          * run. This helper has no business in a kthread.
322          *
323          * access_ok() should prevent writing to non-user memory, but in
324          * some situations (nommu, temporary switch, etc) access_ok() does
325          * not provide enough validation, hence the check on KERNEL_DS.
326          *
327          * nmi_uaccess_okay() ensures the probe is not run in an interim
328          * state, when the task or mm are switched. This is specifically
329          * required to prevent the use of temporary mm.
330          */
331
332         if (unlikely(in_interrupt() ||
333                      current->flags & (PF_KTHREAD | PF_EXITING)))
334                 return -EPERM;
335         if (unlikely(uaccess_kernel()))
336                 return -EPERM;
337         if (unlikely(!nmi_uaccess_okay()))
338                 return -EPERM;
339
340         return copy_to_user_nofault(unsafe_ptr, src, size);
341 }
342
343 static const struct bpf_func_proto bpf_probe_write_user_proto = {
344         .func           = bpf_probe_write_user,
345         .gpl_only       = true,
346         .ret_type       = RET_INTEGER,
347         .arg1_type      = ARG_ANYTHING,
348         .arg2_type      = ARG_PTR_TO_MEM,
349         .arg3_type      = ARG_CONST_SIZE,
350 };
351
352 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
353 {
354         if (!capable(CAP_SYS_ADMIN))
355                 return NULL;
356
357         pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
358                             current->comm, task_pid_nr(current));
359
360         return &bpf_probe_write_user_proto;
361 }
362
363 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
364
365 #define MAX_TRACE_PRINTK_VARARGS        3
366 #define BPF_TRACE_PRINTK_SIZE           1024
367
368 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
369            u64, arg2, u64, arg3)
370 {
371         u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
372         u32 *bin_args;
373         static char buf[BPF_TRACE_PRINTK_SIZE];
374         unsigned long flags;
375         int ret;
376
377         ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
378                                   MAX_TRACE_PRINTK_VARARGS);
379         if (ret < 0)
380                 return ret;
381
382         raw_spin_lock_irqsave(&trace_printk_lock, flags);
383         ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
384
385         trace_bpf_trace_printk(buf);
386         raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
387
388         bpf_bprintf_cleanup();
389
390         return ret;
391 }
392
393 static const struct bpf_func_proto bpf_trace_printk_proto = {
394         .func           = bpf_trace_printk,
395         .gpl_only       = true,
396         .ret_type       = RET_INTEGER,
397         .arg1_type      = ARG_PTR_TO_MEM,
398         .arg2_type      = ARG_CONST_SIZE,
399 };
400
401 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
402 {
403         /*
404          * This program might be calling bpf_trace_printk,
405          * so enable the associated bpf_trace/bpf_trace_printk event.
406          * Repeat this each time as it is possible a user has
407          * disabled bpf_trace_printk events.  By loading a program
408          * calling bpf_trace_printk() however the user has expressed
409          * the intent to see such events.
410          */
411         if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
412                 pr_warn_ratelimited("could not enable bpf_trace_printk events");
413
414         return &bpf_trace_printk_proto;
415 }
416
417 #define MAX_SEQ_PRINTF_VARARGS          12
418
419 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
420            const void *, data, u32, data_len)
421 {
422         int err, num_args;
423         u32 *bin_args;
424
425         if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 ||
426             (data_len && !data))
427                 return -EINVAL;
428         num_args = data_len / 8;
429
430         err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
431         if (err < 0)
432                 return err;
433
434         seq_bprintf(m, fmt, bin_args);
435
436         bpf_bprintf_cleanup();
437
438         return seq_has_overflowed(m) ? -EOVERFLOW : 0;
439 }
440
441 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
442
443 static const struct bpf_func_proto bpf_seq_printf_proto = {
444         .func           = bpf_seq_printf,
445         .gpl_only       = true,
446         .ret_type       = RET_INTEGER,
447         .arg1_type      = ARG_PTR_TO_BTF_ID,
448         .arg1_btf_id    = &btf_seq_file_ids[0],
449         .arg2_type      = ARG_PTR_TO_MEM,
450         .arg3_type      = ARG_CONST_SIZE,
451         .arg4_type      = ARG_PTR_TO_MEM_OR_NULL,
452         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
453 };
454
455 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
456 {
457         return seq_write(m, data, len) ? -EOVERFLOW : 0;
458 }
459
460 static const struct bpf_func_proto bpf_seq_write_proto = {
461         .func           = bpf_seq_write,
462         .gpl_only       = true,
463         .ret_type       = RET_INTEGER,
464         .arg1_type      = ARG_PTR_TO_BTF_ID,
465         .arg1_btf_id    = &btf_seq_file_ids[0],
466         .arg2_type      = ARG_PTR_TO_MEM,
467         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
468 };
469
470 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
471            u32, btf_ptr_size, u64, flags)
472 {
473         const struct btf *btf;
474         s32 btf_id;
475         int ret;
476
477         ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
478         if (ret)
479                 return ret;
480
481         return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
482 }
483
484 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
485         .func           = bpf_seq_printf_btf,
486         .gpl_only       = true,
487         .ret_type       = RET_INTEGER,
488         .arg1_type      = ARG_PTR_TO_BTF_ID,
489         .arg1_btf_id    = &btf_seq_file_ids[0],
490         .arg2_type      = ARG_PTR_TO_MEM,
491         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
492         .arg4_type      = ARG_ANYTHING,
493 };
494
495 static __always_inline int
496 get_map_perf_counter(struct bpf_map *map, u64 flags,
497                      u64 *value, u64 *enabled, u64 *running)
498 {
499         struct bpf_array *array = container_of(map, struct bpf_array, map);
500         unsigned int cpu = smp_processor_id();
501         u64 index = flags & BPF_F_INDEX_MASK;
502         struct bpf_event_entry *ee;
503
504         if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
505                 return -EINVAL;
506         if (index == BPF_F_CURRENT_CPU)
507                 index = cpu;
508         if (unlikely(index >= array->map.max_entries))
509                 return -E2BIG;
510
511         ee = READ_ONCE(array->ptrs[index]);
512         if (!ee)
513                 return -ENOENT;
514
515         return perf_event_read_local(ee->event, value, enabled, running);
516 }
517
518 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
519 {
520         u64 value = 0;
521         int err;
522
523         err = get_map_perf_counter(map, flags, &value, NULL, NULL);
524         /*
525          * this api is ugly since we miss [-22..-2] range of valid
526          * counter values, but that's uapi
527          */
528         if (err)
529                 return err;
530         return value;
531 }
532
533 static const struct bpf_func_proto bpf_perf_event_read_proto = {
534         .func           = bpf_perf_event_read,
535         .gpl_only       = true,
536         .ret_type       = RET_INTEGER,
537         .arg1_type      = ARG_CONST_MAP_PTR,
538         .arg2_type      = ARG_ANYTHING,
539 };
540
541 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
542            struct bpf_perf_event_value *, buf, u32, size)
543 {
544         int err = -EINVAL;
545
546         if (unlikely(size != sizeof(struct bpf_perf_event_value)))
547                 goto clear;
548         err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
549                                    &buf->running);
550         if (unlikely(err))
551                 goto clear;
552         return 0;
553 clear:
554         memset(buf, 0, size);
555         return err;
556 }
557
558 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
559         .func           = bpf_perf_event_read_value,
560         .gpl_only       = true,
561         .ret_type       = RET_INTEGER,
562         .arg1_type      = ARG_CONST_MAP_PTR,
563         .arg2_type      = ARG_ANYTHING,
564         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
565         .arg4_type      = ARG_CONST_SIZE,
566 };
567
568 static __always_inline u64
569 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
570                         u64 flags, struct perf_sample_data *sd)
571 {
572         struct bpf_array *array = container_of(map, struct bpf_array, map);
573         unsigned int cpu = smp_processor_id();
574         u64 index = flags & BPF_F_INDEX_MASK;
575         struct bpf_event_entry *ee;
576         struct perf_event *event;
577
578         if (index == BPF_F_CURRENT_CPU)
579                 index = cpu;
580         if (unlikely(index >= array->map.max_entries))
581                 return -E2BIG;
582
583         ee = READ_ONCE(array->ptrs[index]);
584         if (!ee)
585                 return -ENOENT;
586
587         event = ee->event;
588         if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
589                      event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
590                 return -EINVAL;
591
592         if (unlikely(event->oncpu != cpu))
593                 return -EOPNOTSUPP;
594
595         return perf_event_output(event, sd, regs);
596 }
597
598 /*
599  * Support executing tracepoints in normal, irq, and nmi context that each call
600  * bpf_perf_event_output
601  */
602 struct bpf_trace_sample_data {
603         struct perf_sample_data sds[3];
604 };
605
606 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
607 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
608 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
609            u64, flags, void *, data, u64, size)
610 {
611         struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
612         int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
613         struct perf_raw_record raw = {
614                 .frag = {
615                         .size = size,
616                         .data = data,
617                 },
618         };
619         struct perf_sample_data *sd;
620         int err;
621
622         if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
623                 err = -EBUSY;
624                 goto out;
625         }
626
627         sd = &sds->sds[nest_level - 1];
628
629         if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
630                 err = -EINVAL;
631                 goto out;
632         }
633
634         perf_sample_data_init(sd, 0, 0);
635         sd->raw = &raw;
636
637         err = __bpf_perf_event_output(regs, map, flags, sd);
638
639 out:
640         this_cpu_dec(bpf_trace_nest_level);
641         return err;
642 }
643
644 static const struct bpf_func_proto bpf_perf_event_output_proto = {
645         .func           = bpf_perf_event_output,
646         .gpl_only       = true,
647         .ret_type       = RET_INTEGER,
648         .arg1_type      = ARG_PTR_TO_CTX,
649         .arg2_type      = ARG_CONST_MAP_PTR,
650         .arg3_type      = ARG_ANYTHING,
651         .arg4_type      = ARG_PTR_TO_MEM,
652         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
653 };
654
655 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
656 struct bpf_nested_pt_regs {
657         struct pt_regs regs[3];
658 };
659 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
660 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
661
662 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
663                      void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
664 {
665         int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
666         struct perf_raw_frag frag = {
667                 .copy           = ctx_copy,
668                 .size           = ctx_size,
669                 .data           = ctx,
670         };
671         struct perf_raw_record raw = {
672                 .frag = {
673                         {
674                                 .next   = ctx_size ? &frag : NULL,
675                         },
676                         .size   = meta_size,
677                         .data   = meta,
678                 },
679         };
680         struct perf_sample_data *sd;
681         struct pt_regs *regs;
682         u64 ret;
683
684         if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
685                 ret = -EBUSY;
686                 goto out;
687         }
688         sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
689         regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
690
691         perf_fetch_caller_regs(regs);
692         perf_sample_data_init(sd, 0, 0);
693         sd->raw = &raw;
694
695         ret = __bpf_perf_event_output(regs, map, flags, sd);
696 out:
697         this_cpu_dec(bpf_event_output_nest_level);
698         return ret;
699 }
700
701 BPF_CALL_0(bpf_get_current_task)
702 {
703         return (long) current;
704 }
705
706 const struct bpf_func_proto bpf_get_current_task_proto = {
707         .func           = bpf_get_current_task,
708         .gpl_only       = true,
709         .ret_type       = RET_INTEGER,
710 };
711
712 BPF_CALL_0(bpf_get_current_task_btf)
713 {
714         return (unsigned long) current;
715 }
716
717 BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
718
719 static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
720         .func           = bpf_get_current_task_btf,
721         .gpl_only       = true,
722         .ret_type       = RET_PTR_TO_BTF_ID,
723         .ret_btf_id     = &bpf_get_current_btf_ids[0],
724 };
725
726 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
727 {
728         struct bpf_array *array = container_of(map, struct bpf_array, map);
729         struct cgroup *cgrp;
730
731         if (unlikely(idx >= array->map.max_entries))
732                 return -E2BIG;
733
734         cgrp = READ_ONCE(array->ptrs[idx]);
735         if (unlikely(!cgrp))
736                 return -EAGAIN;
737
738         return task_under_cgroup_hierarchy(current, cgrp);
739 }
740
741 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
742         .func           = bpf_current_task_under_cgroup,
743         .gpl_only       = false,
744         .ret_type       = RET_INTEGER,
745         .arg1_type      = ARG_CONST_MAP_PTR,
746         .arg2_type      = ARG_ANYTHING,
747 };
748
749 struct send_signal_irq_work {
750         struct irq_work irq_work;
751         struct task_struct *task;
752         u32 sig;
753         enum pid_type type;
754 };
755
756 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
757
758 static void do_bpf_send_signal(struct irq_work *entry)
759 {
760         struct send_signal_irq_work *work;
761
762         work = container_of(entry, struct send_signal_irq_work, irq_work);
763         group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
764 }
765
766 static int bpf_send_signal_common(u32 sig, enum pid_type type)
767 {
768         struct send_signal_irq_work *work = NULL;
769
770         /* Similar to bpf_probe_write_user, task needs to be
771          * in a sound condition and kernel memory access be
772          * permitted in order to send signal to the current
773          * task.
774          */
775         if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
776                 return -EPERM;
777         if (unlikely(uaccess_kernel()))
778                 return -EPERM;
779         if (unlikely(!nmi_uaccess_okay()))
780                 return -EPERM;
781
782         if (irqs_disabled()) {
783                 /* Do an early check on signal validity. Otherwise,
784                  * the error is lost in deferred irq_work.
785                  */
786                 if (unlikely(!valid_signal(sig)))
787                         return -EINVAL;
788
789                 work = this_cpu_ptr(&send_signal_work);
790                 if (irq_work_is_busy(&work->irq_work))
791                         return -EBUSY;
792
793                 /* Add the current task, which is the target of sending signal,
794                  * to the irq_work. The current task may change when queued
795                  * irq works get executed.
796                  */
797                 work->task = current;
798                 work->sig = sig;
799                 work->type = type;
800                 irq_work_queue(&work->irq_work);
801                 return 0;
802         }
803
804         return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
805 }
806
807 BPF_CALL_1(bpf_send_signal, u32, sig)
808 {
809         return bpf_send_signal_common(sig, PIDTYPE_TGID);
810 }
811
812 static const struct bpf_func_proto bpf_send_signal_proto = {
813         .func           = bpf_send_signal,
814         .gpl_only       = false,
815         .ret_type       = RET_INTEGER,
816         .arg1_type      = ARG_ANYTHING,
817 };
818
819 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
820 {
821         return bpf_send_signal_common(sig, PIDTYPE_PID);
822 }
823
824 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
825         .func           = bpf_send_signal_thread,
826         .gpl_only       = false,
827         .ret_type       = RET_INTEGER,
828         .arg1_type      = ARG_ANYTHING,
829 };
830
831 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
832 {
833         long len;
834         char *p;
835
836         if (!sz)
837                 return 0;
838
839         p = d_path(path, buf, sz);
840         if (IS_ERR(p)) {
841                 len = PTR_ERR(p);
842         } else {
843                 len = buf + sz - p;
844                 memmove(buf, p, len);
845         }
846
847         return len;
848 }
849
850 BTF_SET_START(btf_allowlist_d_path)
851 #ifdef CONFIG_SECURITY
852 BTF_ID(func, security_file_permission)
853 BTF_ID(func, security_inode_getattr)
854 BTF_ID(func, security_file_open)
855 #endif
856 #ifdef CONFIG_SECURITY_PATH
857 BTF_ID(func, security_path_truncate)
858 #endif
859 BTF_ID(func, vfs_truncate)
860 BTF_ID(func, vfs_fallocate)
861 BTF_ID(func, dentry_open)
862 BTF_ID(func, vfs_getattr)
863 BTF_ID(func, filp_close)
864 BTF_SET_END(btf_allowlist_d_path)
865
866 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
867 {
868         if (prog->type == BPF_PROG_TYPE_TRACING &&
869             prog->expected_attach_type == BPF_TRACE_ITER)
870                 return true;
871
872         if (prog->type == BPF_PROG_TYPE_LSM)
873                 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
874
875         return btf_id_set_contains(&btf_allowlist_d_path,
876                                    prog->aux->attach_btf_id);
877 }
878
879 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
880
881 static const struct bpf_func_proto bpf_d_path_proto = {
882         .func           = bpf_d_path,
883         .gpl_only       = false,
884         .ret_type       = RET_INTEGER,
885         .arg1_type      = ARG_PTR_TO_BTF_ID,
886         .arg1_btf_id    = &bpf_d_path_btf_ids[0],
887         .arg2_type      = ARG_PTR_TO_MEM,
888         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
889         .allowed        = bpf_d_path_allowed,
890 };
891
892 #define BTF_F_ALL       (BTF_F_COMPACT  | BTF_F_NONAME | \
893                          BTF_F_PTR_RAW | BTF_F_ZERO)
894
895 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
896                                   u64 flags, const struct btf **btf,
897                                   s32 *btf_id)
898 {
899         const struct btf_type *t;
900
901         if (unlikely(flags & ~(BTF_F_ALL)))
902                 return -EINVAL;
903
904         if (btf_ptr_size != sizeof(struct btf_ptr))
905                 return -EINVAL;
906
907         *btf = bpf_get_btf_vmlinux();
908
909         if (IS_ERR_OR_NULL(*btf))
910                 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
911
912         if (ptr->type_id > 0)
913                 *btf_id = ptr->type_id;
914         else
915                 return -EINVAL;
916
917         if (*btf_id > 0)
918                 t = btf_type_by_id(*btf, *btf_id);
919         if (*btf_id <= 0 || !t)
920                 return -ENOENT;
921
922         return 0;
923 }
924
925 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
926            u32, btf_ptr_size, u64, flags)
927 {
928         const struct btf *btf;
929         s32 btf_id;
930         int ret;
931
932         ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
933         if (ret)
934                 return ret;
935
936         return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
937                                       flags);
938 }
939
940 const struct bpf_func_proto bpf_snprintf_btf_proto = {
941         .func           = bpf_snprintf_btf,
942         .gpl_only       = false,
943         .ret_type       = RET_INTEGER,
944         .arg1_type      = ARG_PTR_TO_MEM,
945         .arg2_type      = ARG_CONST_SIZE,
946         .arg3_type      = ARG_PTR_TO_MEM,
947         .arg4_type      = ARG_CONST_SIZE,
948         .arg5_type      = ARG_ANYTHING,
949 };
950
951 const struct bpf_func_proto *
952 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
953 {
954         switch (func_id) {
955         case BPF_FUNC_map_lookup_elem:
956                 return &bpf_map_lookup_elem_proto;
957         case BPF_FUNC_map_update_elem:
958                 return &bpf_map_update_elem_proto;
959         case BPF_FUNC_map_delete_elem:
960                 return &bpf_map_delete_elem_proto;
961         case BPF_FUNC_map_push_elem:
962                 return &bpf_map_push_elem_proto;
963         case BPF_FUNC_map_pop_elem:
964                 return &bpf_map_pop_elem_proto;
965         case BPF_FUNC_map_peek_elem:
966                 return &bpf_map_peek_elem_proto;
967         case BPF_FUNC_ktime_get_ns:
968                 return &bpf_ktime_get_ns_proto;
969         case BPF_FUNC_ktime_get_boot_ns:
970                 return &bpf_ktime_get_boot_ns_proto;
971         case BPF_FUNC_ktime_get_coarse_ns:
972                 return &bpf_ktime_get_coarse_ns_proto;
973         case BPF_FUNC_tail_call:
974                 return &bpf_tail_call_proto;
975         case BPF_FUNC_get_current_pid_tgid:
976                 return &bpf_get_current_pid_tgid_proto;
977         case BPF_FUNC_get_current_task:
978                 return &bpf_get_current_task_proto;
979         case BPF_FUNC_get_current_task_btf:
980                 return &bpf_get_current_task_btf_proto;
981         case BPF_FUNC_get_current_uid_gid:
982                 return &bpf_get_current_uid_gid_proto;
983         case BPF_FUNC_get_current_comm:
984                 return &bpf_get_current_comm_proto;
985         case BPF_FUNC_trace_printk:
986                 return bpf_get_trace_printk_proto();
987         case BPF_FUNC_get_smp_processor_id:
988                 return &bpf_get_smp_processor_id_proto;
989         case BPF_FUNC_get_numa_node_id:
990                 return &bpf_get_numa_node_id_proto;
991         case BPF_FUNC_perf_event_read:
992                 return &bpf_perf_event_read_proto;
993         case BPF_FUNC_probe_write_user:
994                 return bpf_get_probe_write_proto();
995         case BPF_FUNC_current_task_under_cgroup:
996                 return &bpf_current_task_under_cgroup_proto;
997         case BPF_FUNC_get_prandom_u32:
998                 return &bpf_get_prandom_u32_proto;
999         case BPF_FUNC_probe_read_user:
1000                 return &bpf_probe_read_user_proto;
1001         case BPF_FUNC_probe_read_kernel:
1002                 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1003                        NULL : &bpf_probe_read_kernel_proto;
1004         case BPF_FUNC_probe_read_user_str:
1005                 return &bpf_probe_read_user_str_proto;
1006         case BPF_FUNC_probe_read_kernel_str:
1007                 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1008                        NULL : &bpf_probe_read_kernel_str_proto;
1009 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1010         case BPF_FUNC_probe_read:
1011                 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1012                        NULL : &bpf_probe_read_compat_proto;
1013         case BPF_FUNC_probe_read_str:
1014                 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
1015                        NULL : &bpf_probe_read_compat_str_proto;
1016 #endif
1017 #ifdef CONFIG_CGROUPS
1018         case BPF_FUNC_get_current_cgroup_id:
1019                 return &bpf_get_current_cgroup_id_proto;
1020 #endif
1021         case BPF_FUNC_send_signal:
1022                 return &bpf_send_signal_proto;
1023         case BPF_FUNC_send_signal_thread:
1024                 return &bpf_send_signal_thread_proto;
1025         case BPF_FUNC_perf_event_read_value:
1026                 return &bpf_perf_event_read_value_proto;
1027         case BPF_FUNC_get_ns_current_pid_tgid:
1028                 return &bpf_get_ns_current_pid_tgid_proto;
1029         case BPF_FUNC_ringbuf_output:
1030                 return &bpf_ringbuf_output_proto;
1031         case BPF_FUNC_ringbuf_reserve:
1032                 return &bpf_ringbuf_reserve_proto;
1033         case BPF_FUNC_ringbuf_submit:
1034                 return &bpf_ringbuf_submit_proto;
1035         case BPF_FUNC_ringbuf_discard:
1036                 return &bpf_ringbuf_discard_proto;
1037         case BPF_FUNC_ringbuf_query:
1038                 return &bpf_ringbuf_query_proto;
1039         case BPF_FUNC_jiffies64:
1040                 return &bpf_jiffies64_proto;
1041         case BPF_FUNC_get_task_stack:
1042                 return &bpf_get_task_stack_proto;
1043         case BPF_FUNC_copy_from_user:
1044                 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1045         case BPF_FUNC_snprintf_btf:
1046                 return &bpf_snprintf_btf_proto;
1047         case BPF_FUNC_per_cpu_ptr:
1048                 return &bpf_per_cpu_ptr_proto;
1049         case BPF_FUNC_this_cpu_ptr:
1050                 return &bpf_this_cpu_ptr_proto;
1051         case BPF_FUNC_task_storage_get:
1052                 return &bpf_task_storage_get_proto;
1053         case BPF_FUNC_task_storage_delete:
1054                 return &bpf_task_storage_delete_proto;
1055         case BPF_FUNC_for_each_map_elem:
1056                 return &bpf_for_each_map_elem_proto;
1057         case BPF_FUNC_snprintf:
1058                 return &bpf_snprintf_proto;
1059         default:
1060                 return NULL;
1061         }
1062 }
1063
1064 static const struct bpf_func_proto *
1065 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1066 {
1067         switch (func_id) {
1068         case BPF_FUNC_perf_event_output:
1069                 return &bpf_perf_event_output_proto;
1070         case BPF_FUNC_get_stackid:
1071                 return &bpf_get_stackid_proto;
1072         case BPF_FUNC_get_stack:
1073                 return &bpf_get_stack_proto;
1074 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1075         case BPF_FUNC_override_return:
1076                 return &bpf_override_return_proto;
1077 #endif
1078         default:
1079                 return bpf_tracing_func_proto(func_id, prog);
1080         }
1081 }
1082
1083 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1084 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1085                                         const struct bpf_prog *prog,
1086                                         struct bpf_insn_access_aux *info)
1087 {
1088         if (off < 0 || off >= sizeof(struct pt_regs))
1089                 return false;
1090         if (type != BPF_READ)
1091                 return false;
1092         if (off % size != 0)
1093                 return false;
1094         /*
1095          * Assertion for 32 bit to make sure last 8 byte access
1096          * (BPF_DW) to the last 4 byte member is disallowed.
1097          */
1098         if (off + size > sizeof(struct pt_regs))
1099                 return false;
1100
1101         return true;
1102 }
1103
1104 const struct bpf_verifier_ops kprobe_verifier_ops = {
1105         .get_func_proto  = kprobe_prog_func_proto,
1106         .is_valid_access = kprobe_prog_is_valid_access,
1107 };
1108
1109 const struct bpf_prog_ops kprobe_prog_ops = {
1110 };
1111
1112 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1113            u64, flags, void *, data, u64, size)
1114 {
1115         struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1116
1117         /*
1118          * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1119          * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1120          * from there and call the same bpf_perf_event_output() helper inline.
1121          */
1122         return ____bpf_perf_event_output(regs, map, flags, data, size);
1123 }
1124
1125 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1126         .func           = bpf_perf_event_output_tp,
1127         .gpl_only       = true,
1128         .ret_type       = RET_INTEGER,
1129         .arg1_type      = ARG_PTR_TO_CTX,
1130         .arg2_type      = ARG_CONST_MAP_PTR,
1131         .arg3_type      = ARG_ANYTHING,
1132         .arg4_type      = ARG_PTR_TO_MEM,
1133         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
1134 };
1135
1136 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1137            u64, flags)
1138 {
1139         struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1140
1141         /*
1142          * Same comment as in bpf_perf_event_output_tp(), only that this time
1143          * the other helper's function body cannot be inlined due to being
1144          * external, thus we need to call raw helper function.
1145          */
1146         return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1147                                flags, 0, 0);
1148 }
1149
1150 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1151         .func           = bpf_get_stackid_tp,
1152         .gpl_only       = true,
1153         .ret_type       = RET_INTEGER,
1154         .arg1_type      = ARG_PTR_TO_CTX,
1155         .arg2_type      = ARG_CONST_MAP_PTR,
1156         .arg3_type      = ARG_ANYTHING,
1157 };
1158
1159 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1160            u64, flags)
1161 {
1162         struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1163
1164         return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1165                              (unsigned long) size, flags, 0);
1166 }
1167
1168 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1169         .func           = bpf_get_stack_tp,
1170         .gpl_only       = true,
1171         .ret_type       = RET_INTEGER,
1172         .arg1_type      = ARG_PTR_TO_CTX,
1173         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1174         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1175         .arg4_type      = ARG_ANYTHING,
1176 };
1177
1178 static const struct bpf_func_proto *
1179 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1180 {
1181         switch (func_id) {
1182         case BPF_FUNC_perf_event_output:
1183                 return &bpf_perf_event_output_proto_tp;
1184         case BPF_FUNC_get_stackid:
1185                 return &bpf_get_stackid_proto_tp;
1186         case BPF_FUNC_get_stack:
1187                 return &bpf_get_stack_proto_tp;
1188         default:
1189                 return bpf_tracing_func_proto(func_id, prog);
1190         }
1191 }
1192
1193 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1194                                     const struct bpf_prog *prog,
1195                                     struct bpf_insn_access_aux *info)
1196 {
1197         if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1198                 return false;
1199         if (type != BPF_READ)
1200                 return false;
1201         if (off % size != 0)
1202                 return false;
1203
1204         BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1205         return true;
1206 }
1207
1208 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1209         .get_func_proto  = tp_prog_func_proto,
1210         .is_valid_access = tp_prog_is_valid_access,
1211 };
1212
1213 const struct bpf_prog_ops tracepoint_prog_ops = {
1214 };
1215
1216 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1217            struct bpf_perf_event_value *, buf, u32, size)
1218 {
1219         int err = -EINVAL;
1220
1221         if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1222                 goto clear;
1223         err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1224                                     &buf->running);
1225         if (unlikely(err))
1226                 goto clear;
1227         return 0;
1228 clear:
1229         memset(buf, 0, size);
1230         return err;
1231 }
1232
1233 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1234          .func           = bpf_perf_prog_read_value,
1235          .gpl_only       = true,
1236          .ret_type       = RET_INTEGER,
1237          .arg1_type      = ARG_PTR_TO_CTX,
1238          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1239          .arg3_type      = ARG_CONST_SIZE,
1240 };
1241
1242 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1243            void *, buf, u32, size, u64, flags)
1244 {
1245 #ifndef CONFIG_X86
1246         return -ENOENT;
1247 #else
1248         static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1249         struct perf_branch_stack *br_stack = ctx->data->br_stack;
1250         u32 to_copy;
1251
1252         if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1253                 return -EINVAL;
1254
1255         if (unlikely(!br_stack))
1256                 return -EINVAL;
1257
1258         if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1259                 return br_stack->nr * br_entry_size;
1260
1261         if (!buf || (size % br_entry_size != 0))
1262                 return -EINVAL;
1263
1264         to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1265         memcpy(buf, br_stack->entries, to_copy);
1266
1267         return to_copy;
1268 #endif
1269 }
1270
1271 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1272         .func           = bpf_read_branch_records,
1273         .gpl_only       = true,
1274         .ret_type       = RET_INTEGER,
1275         .arg1_type      = ARG_PTR_TO_CTX,
1276         .arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1277         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1278         .arg4_type      = ARG_ANYTHING,
1279 };
1280
1281 static const struct bpf_func_proto *
1282 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1283 {
1284         switch (func_id) {
1285         case BPF_FUNC_perf_event_output:
1286                 return &bpf_perf_event_output_proto_tp;
1287         case BPF_FUNC_get_stackid:
1288                 return &bpf_get_stackid_proto_pe;
1289         case BPF_FUNC_get_stack:
1290                 return &bpf_get_stack_proto_pe;
1291         case BPF_FUNC_perf_prog_read_value:
1292                 return &bpf_perf_prog_read_value_proto;
1293         case BPF_FUNC_read_branch_records:
1294                 return &bpf_read_branch_records_proto;
1295         default:
1296                 return bpf_tracing_func_proto(func_id, prog);
1297         }
1298 }
1299
1300 /*
1301  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1302  * to avoid potential recursive reuse issue when/if tracepoints are added
1303  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1304  *
1305  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1306  * in normal, irq, and nmi context.
1307  */
1308 struct bpf_raw_tp_regs {
1309         struct pt_regs regs[3];
1310 };
1311 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1312 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1313 static struct pt_regs *get_bpf_raw_tp_regs(void)
1314 {
1315         struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1316         int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1317
1318         if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1319                 this_cpu_dec(bpf_raw_tp_nest_level);
1320                 return ERR_PTR(-EBUSY);
1321         }
1322
1323         return &tp_regs->regs[nest_level - 1];
1324 }
1325
1326 static void put_bpf_raw_tp_regs(void)
1327 {
1328         this_cpu_dec(bpf_raw_tp_nest_level);
1329 }
1330
1331 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1332            struct bpf_map *, map, u64, flags, void *, data, u64, size)
1333 {
1334         struct pt_regs *regs = get_bpf_raw_tp_regs();
1335         int ret;
1336
1337         if (IS_ERR(regs))
1338                 return PTR_ERR(regs);
1339
1340         perf_fetch_caller_regs(regs);
1341         ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1342
1343         put_bpf_raw_tp_regs();
1344         return ret;
1345 }
1346
1347 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1348         .func           = bpf_perf_event_output_raw_tp,
1349         .gpl_only       = true,
1350         .ret_type       = RET_INTEGER,
1351         .arg1_type      = ARG_PTR_TO_CTX,
1352         .arg2_type      = ARG_CONST_MAP_PTR,
1353         .arg3_type      = ARG_ANYTHING,
1354         .arg4_type      = ARG_PTR_TO_MEM,
1355         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
1356 };
1357
1358 extern const struct bpf_func_proto bpf_skb_output_proto;
1359 extern const struct bpf_func_proto bpf_xdp_output_proto;
1360
1361 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1362            struct bpf_map *, map, u64, flags)
1363 {
1364         struct pt_regs *regs = get_bpf_raw_tp_regs();
1365         int ret;
1366
1367         if (IS_ERR(regs))
1368                 return PTR_ERR(regs);
1369
1370         perf_fetch_caller_regs(regs);
1371         /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1372         ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1373                               flags, 0, 0);
1374         put_bpf_raw_tp_regs();
1375         return ret;
1376 }
1377
1378 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1379         .func           = bpf_get_stackid_raw_tp,
1380         .gpl_only       = true,
1381         .ret_type       = RET_INTEGER,
1382         .arg1_type      = ARG_PTR_TO_CTX,
1383         .arg2_type      = ARG_CONST_MAP_PTR,
1384         .arg3_type      = ARG_ANYTHING,
1385 };
1386
1387 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1388            void *, buf, u32, size, u64, flags)
1389 {
1390         struct pt_regs *regs = get_bpf_raw_tp_regs();
1391         int ret;
1392
1393         if (IS_ERR(regs))
1394                 return PTR_ERR(regs);
1395
1396         perf_fetch_caller_regs(regs);
1397         ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1398                             (unsigned long) size, flags, 0);
1399         put_bpf_raw_tp_regs();
1400         return ret;
1401 }
1402
1403 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1404         .func           = bpf_get_stack_raw_tp,
1405         .gpl_only       = true,
1406         .ret_type       = RET_INTEGER,
1407         .arg1_type      = ARG_PTR_TO_CTX,
1408         .arg2_type      = ARG_PTR_TO_MEM,
1409         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1410         .arg4_type      = ARG_ANYTHING,
1411 };
1412
1413 static const struct bpf_func_proto *
1414 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1415 {
1416         switch (func_id) {
1417         case BPF_FUNC_perf_event_output:
1418                 return &bpf_perf_event_output_proto_raw_tp;
1419         case BPF_FUNC_get_stackid:
1420                 return &bpf_get_stackid_proto_raw_tp;
1421         case BPF_FUNC_get_stack:
1422                 return &bpf_get_stack_proto_raw_tp;
1423         default:
1424                 return bpf_tracing_func_proto(func_id, prog);
1425         }
1426 }
1427
1428 const struct bpf_func_proto *
1429 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1430 {
1431         switch (func_id) {
1432 #ifdef CONFIG_NET
1433         case BPF_FUNC_skb_output:
1434                 return &bpf_skb_output_proto;
1435         case BPF_FUNC_xdp_output:
1436                 return &bpf_xdp_output_proto;
1437         case BPF_FUNC_skc_to_tcp6_sock:
1438                 return &bpf_skc_to_tcp6_sock_proto;
1439         case BPF_FUNC_skc_to_tcp_sock:
1440                 return &bpf_skc_to_tcp_sock_proto;
1441         case BPF_FUNC_skc_to_tcp_timewait_sock:
1442                 return &bpf_skc_to_tcp_timewait_sock_proto;
1443         case BPF_FUNC_skc_to_tcp_request_sock:
1444                 return &bpf_skc_to_tcp_request_sock_proto;
1445         case BPF_FUNC_skc_to_udp6_sock:
1446                 return &bpf_skc_to_udp6_sock_proto;
1447         case BPF_FUNC_sk_storage_get:
1448                 return &bpf_sk_storage_get_tracing_proto;
1449         case BPF_FUNC_sk_storage_delete:
1450                 return &bpf_sk_storage_delete_tracing_proto;
1451         case BPF_FUNC_sock_from_file:
1452                 return &bpf_sock_from_file_proto;
1453         case BPF_FUNC_get_socket_cookie:
1454                 return &bpf_get_socket_ptr_cookie_proto;
1455 #endif
1456         case BPF_FUNC_seq_printf:
1457                 return prog->expected_attach_type == BPF_TRACE_ITER ?
1458                        &bpf_seq_printf_proto :
1459                        NULL;
1460         case BPF_FUNC_seq_write:
1461                 return prog->expected_attach_type == BPF_TRACE_ITER ?
1462                        &bpf_seq_write_proto :
1463                        NULL;
1464         case BPF_FUNC_seq_printf_btf:
1465                 return prog->expected_attach_type == BPF_TRACE_ITER ?
1466                        &bpf_seq_printf_btf_proto :
1467                        NULL;
1468         case BPF_FUNC_d_path:
1469                 return &bpf_d_path_proto;
1470         default:
1471                 return raw_tp_prog_func_proto(func_id, prog);
1472         }
1473 }
1474
1475 static bool raw_tp_prog_is_valid_access(int off, int size,
1476                                         enum bpf_access_type type,
1477                                         const struct bpf_prog *prog,
1478                                         struct bpf_insn_access_aux *info)
1479 {
1480         if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1481                 return false;
1482         if (type != BPF_READ)
1483                 return false;
1484         if (off % size != 0)
1485                 return false;
1486         return true;
1487 }
1488
1489 static bool tracing_prog_is_valid_access(int off, int size,
1490                                          enum bpf_access_type type,
1491                                          const struct bpf_prog *prog,
1492                                          struct bpf_insn_access_aux *info)
1493 {
1494         if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1495                 return false;
1496         if (type != BPF_READ)
1497                 return false;
1498         if (off % size != 0)
1499                 return false;
1500         return btf_ctx_access(off, size, type, prog, info);
1501 }
1502
1503 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1504                                      const union bpf_attr *kattr,
1505                                      union bpf_attr __user *uattr)
1506 {
1507         return -ENOTSUPP;
1508 }
1509
1510 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1511         .get_func_proto  = raw_tp_prog_func_proto,
1512         .is_valid_access = raw_tp_prog_is_valid_access,
1513 };
1514
1515 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1516 #ifdef CONFIG_NET
1517         .test_run = bpf_prog_test_run_raw_tp,
1518 #endif
1519 };
1520
1521 const struct bpf_verifier_ops tracing_verifier_ops = {
1522         .get_func_proto  = tracing_prog_func_proto,
1523         .is_valid_access = tracing_prog_is_valid_access,
1524 };
1525
1526 const struct bpf_prog_ops tracing_prog_ops = {
1527         .test_run = bpf_prog_test_run_tracing,
1528 };
1529
1530 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1531                                                  enum bpf_access_type type,
1532                                                  const struct bpf_prog *prog,
1533                                                  struct bpf_insn_access_aux *info)
1534 {
1535         if (off == 0) {
1536                 if (size != sizeof(u64) || type != BPF_READ)
1537                         return false;
1538                 info->reg_type = PTR_TO_TP_BUFFER;
1539         }
1540         return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1541 }
1542
1543 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1544         .get_func_proto  = raw_tp_prog_func_proto,
1545         .is_valid_access = raw_tp_writable_prog_is_valid_access,
1546 };
1547
1548 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1549 };
1550
1551 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1552                                     const struct bpf_prog *prog,
1553                                     struct bpf_insn_access_aux *info)
1554 {
1555         const int size_u64 = sizeof(u64);
1556
1557         if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1558                 return false;
1559         if (type != BPF_READ)
1560                 return false;
1561         if (off % size != 0) {
1562                 if (sizeof(unsigned long) != 4)
1563                         return false;
1564                 if (size != 8)
1565                         return false;
1566                 if (off % size != 4)
1567                         return false;
1568         }
1569
1570         switch (off) {
1571         case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1572                 bpf_ctx_record_field_size(info, size_u64);
1573                 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1574                         return false;
1575                 break;
1576         case bpf_ctx_range(struct bpf_perf_event_data, addr):
1577                 bpf_ctx_record_field_size(info, size_u64);
1578                 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1579                         return false;
1580                 break;
1581         default:
1582                 if (size != sizeof(long))
1583                         return false;
1584         }
1585
1586         return true;
1587 }
1588
1589 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1590                                       const struct bpf_insn *si,
1591                                       struct bpf_insn *insn_buf,
1592                                       struct bpf_prog *prog, u32 *target_size)
1593 {
1594         struct bpf_insn *insn = insn_buf;
1595
1596         switch (si->off) {
1597         case offsetof(struct bpf_perf_event_data, sample_period):
1598                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1599                                                        data), si->dst_reg, si->src_reg,
1600                                       offsetof(struct bpf_perf_event_data_kern, data));
1601                 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1602                                       bpf_target_off(struct perf_sample_data, period, 8,
1603                                                      target_size));
1604                 break;
1605         case offsetof(struct bpf_perf_event_data, addr):
1606                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1607                                                        data), si->dst_reg, si->src_reg,
1608                                       offsetof(struct bpf_perf_event_data_kern, data));
1609                 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1610                                       bpf_target_off(struct perf_sample_data, addr, 8,
1611                                                      target_size));
1612                 break;
1613         default:
1614                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1615                                                        regs), si->dst_reg, si->src_reg,
1616                                       offsetof(struct bpf_perf_event_data_kern, regs));
1617                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1618                                       si->off);
1619                 break;
1620         }
1621
1622         return insn - insn_buf;
1623 }
1624
1625 const struct bpf_verifier_ops perf_event_verifier_ops = {
1626         .get_func_proto         = pe_prog_func_proto,
1627         .is_valid_access        = pe_prog_is_valid_access,
1628         .convert_ctx_access     = pe_prog_convert_ctx_access,
1629 };
1630
1631 const struct bpf_prog_ops perf_event_prog_ops = {
1632 };
1633
1634 static DEFINE_MUTEX(bpf_event_mutex);
1635
1636 #define BPF_TRACE_MAX_PROGS 64
1637
1638 int perf_event_attach_bpf_prog(struct perf_event *event,
1639                                struct bpf_prog *prog)
1640 {
1641         struct bpf_prog_array *old_array;
1642         struct bpf_prog_array *new_array;
1643         int ret = -EEXIST;
1644
1645         /*
1646          * Kprobe override only works if they are on the function entry,
1647          * and only if they are on the opt-in list.
1648          */
1649         if (prog->kprobe_override &&
1650             (!trace_kprobe_on_func_entry(event->tp_event) ||
1651              !trace_kprobe_error_injectable(event->tp_event)))
1652                 return -EINVAL;
1653
1654         mutex_lock(&bpf_event_mutex);
1655
1656         if (event->prog)
1657                 goto unlock;
1658
1659         old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1660         if (old_array &&
1661             bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1662                 ret = -E2BIG;
1663                 goto unlock;
1664         }
1665
1666         ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1667         if (ret < 0)
1668                 goto unlock;
1669
1670         /* set the new array to event->tp_event and set event->prog */
1671         event->prog = prog;
1672         rcu_assign_pointer(event->tp_event->prog_array, new_array);
1673         bpf_prog_array_free(old_array);
1674
1675 unlock:
1676         mutex_unlock(&bpf_event_mutex);
1677         return ret;
1678 }
1679
1680 void perf_event_detach_bpf_prog(struct perf_event *event)
1681 {
1682         struct bpf_prog_array *old_array;
1683         struct bpf_prog_array *new_array;
1684         int ret;
1685
1686         mutex_lock(&bpf_event_mutex);
1687
1688         if (!event->prog)
1689                 goto unlock;
1690
1691         old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1692         ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1693         if (ret == -ENOENT)
1694                 goto unlock;
1695         if (ret < 0) {
1696                 bpf_prog_array_delete_safe(old_array, event->prog);
1697         } else {
1698                 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1699                 bpf_prog_array_free(old_array);
1700         }
1701
1702         bpf_prog_put(event->prog);
1703         event->prog = NULL;
1704
1705 unlock:
1706         mutex_unlock(&bpf_event_mutex);
1707 }
1708
1709 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1710 {
1711         struct perf_event_query_bpf __user *uquery = info;
1712         struct perf_event_query_bpf query = {};
1713         struct bpf_prog_array *progs;
1714         u32 *ids, prog_cnt, ids_len;
1715         int ret;
1716
1717         if (!perfmon_capable())
1718                 return -EPERM;
1719         if (event->attr.type != PERF_TYPE_TRACEPOINT)
1720                 return -EINVAL;
1721         if (copy_from_user(&query, uquery, sizeof(query)))
1722                 return -EFAULT;
1723
1724         ids_len = query.ids_len;
1725         if (ids_len > BPF_TRACE_MAX_PROGS)
1726                 return -E2BIG;
1727         ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1728         if (!ids)
1729                 return -ENOMEM;
1730         /*
1731          * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1732          * is required when user only wants to check for uquery->prog_cnt.
1733          * There is no need to check for it since the case is handled
1734          * gracefully in bpf_prog_array_copy_info.
1735          */
1736
1737         mutex_lock(&bpf_event_mutex);
1738         progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1739         ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1740         mutex_unlock(&bpf_event_mutex);
1741
1742         if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1743             copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1744                 ret = -EFAULT;
1745
1746         kfree(ids);
1747         return ret;
1748 }
1749
1750 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1751 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1752
1753 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1754 {
1755         struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1756
1757         for (; btp < __stop__bpf_raw_tp; btp++) {
1758                 if (!strcmp(btp->tp->name, name))
1759                         return btp;
1760         }
1761
1762         return bpf_get_raw_tracepoint_module(name);
1763 }
1764
1765 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1766 {
1767         struct module *mod;
1768
1769         preempt_disable();
1770         mod = __module_address((unsigned long)btp);
1771         module_put(mod);
1772         preempt_enable();
1773 }
1774
1775 static __always_inline
1776 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1777 {
1778         cant_sleep();
1779         rcu_read_lock();
1780         (void) BPF_PROG_RUN(prog, args);
1781         rcu_read_unlock();
1782 }
1783
1784 #define UNPACK(...)                     __VA_ARGS__
1785 #define REPEAT_1(FN, DL, X, ...)        FN(X)
1786 #define REPEAT_2(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1787 #define REPEAT_3(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1788 #define REPEAT_4(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1789 #define REPEAT_5(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1790 #define REPEAT_6(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1791 #define REPEAT_7(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1792 #define REPEAT_8(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1793 #define REPEAT_9(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1794 #define REPEAT_10(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1795 #define REPEAT_11(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1796 #define REPEAT_12(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1797 #define REPEAT(X, FN, DL, ...)          REPEAT_##X(FN, DL, __VA_ARGS__)
1798
1799 #define SARG(X)         u64 arg##X
1800 #define COPY(X)         args[X] = arg##X
1801
1802 #define __DL_COM        (,)
1803 #define __DL_SEM        (;)
1804
1805 #define __SEQ_0_11      0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1806
1807 #define BPF_TRACE_DEFN_x(x)                                             \
1808         void bpf_trace_run##x(struct bpf_prog *prog,                    \
1809                               REPEAT(x, SARG, __DL_COM, __SEQ_0_11))    \
1810         {                                                               \
1811                 u64 args[x];                                            \
1812                 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);                  \
1813                 __bpf_trace_run(prog, args);                            \
1814         }                                                               \
1815         EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1816 BPF_TRACE_DEFN_x(1);
1817 BPF_TRACE_DEFN_x(2);
1818 BPF_TRACE_DEFN_x(3);
1819 BPF_TRACE_DEFN_x(4);
1820 BPF_TRACE_DEFN_x(5);
1821 BPF_TRACE_DEFN_x(6);
1822 BPF_TRACE_DEFN_x(7);
1823 BPF_TRACE_DEFN_x(8);
1824 BPF_TRACE_DEFN_x(9);
1825 BPF_TRACE_DEFN_x(10);
1826 BPF_TRACE_DEFN_x(11);
1827 BPF_TRACE_DEFN_x(12);
1828
1829 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1830 {
1831         struct tracepoint *tp = btp->tp;
1832
1833         /*
1834          * check that program doesn't access arguments beyond what's
1835          * available in this tracepoint
1836          */
1837         if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1838                 return -EINVAL;
1839
1840         if (prog->aux->max_tp_access > btp->writable_size)
1841                 return -EINVAL;
1842
1843         return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1844 }
1845
1846 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1847 {
1848         return __bpf_probe_register(btp, prog);
1849 }
1850
1851 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1852 {
1853         return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1854 }
1855
1856 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1857                             u32 *fd_type, const char **buf,
1858                             u64 *probe_offset, u64 *probe_addr)
1859 {
1860         bool is_tracepoint, is_syscall_tp;
1861         struct bpf_prog *prog;
1862         int flags, err = 0;
1863
1864         prog = event->prog;
1865         if (!prog)
1866                 return -ENOENT;
1867
1868         /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1869         if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1870                 return -EOPNOTSUPP;
1871
1872         *prog_id = prog->aux->id;
1873         flags = event->tp_event->flags;
1874         is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1875         is_syscall_tp = is_syscall_trace_event(event->tp_event);
1876
1877         if (is_tracepoint || is_syscall_tp) {
1878                 *buf = is_tracepoint ? event->tp_event->tp->name
1879                                      : event->tp_event->name;
1880                 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1881                 *probe_offset = 0x0;
1882                 *probe_addr = 0x0;
1883         } else {
1884                 /* kprobe/uprobe */
1885                 err = -EOPNOTSUPP;
1886 #ifdef CONFIG_KPROBE_EVENTS
1887                 if (flags & TRACE_EVENT_FL_KPROBE)
1888                         err = bpf_get_kprobe_info(event, fd_type, buf,
1889                                                   probe_offset, probe_addr,
1890                                                   event->attr.type == PERF_TYPE_TRACEPOINT);
1891 #endif
1892 #ifdef CONFIG_UPROBE_EVENTS
1893                 if (flags & TRACE_EVENT_FL_UPROBE)
1894                         err = bpf_get_uprobe_info(event, fd_type, buf,
1895                                                   probe_offset,
1896                                                   event->attr.type == PERF_TYPE_TRACEPOINT);
1897 #endif
1898         }
1899
1900         return err;
1901 }
1902
1903 static int __init send_signal_irq_work_init(void)
1904 {
1905         int cpu;
1906         struct send_signal_irq_work *work;
1907
1908         for_each_possible_cpu(cpu) {
1909                 work = per_cpu_ptr(&send_signal_work, cpu);
1910                 init_irq_work(&work->irq_work, do_bpf_send_signal);
1911         }
1912         return 0;
1913 }
1914
1915 subsys_initcall(send_signal_irq_work_init);
1916
1917 #ifdef CONFIG_MODULES
1918 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1919                             void *module)
1920 {
1921         struct bpf_trace_module *btm, *tmp;
1922         struct module *mod = module;
1923         int ret = 0;
1924
1925         if (mod->num_bpf_raw_events == 0 ||
1926             (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1927                 goto out;
1928
1929         mutex_lock(&bpf_module_mutex);
1930
1931         switch (op) {
1932         case MODULE_STATE_COMING:
1933                 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1934                 if (btm) {
1935                         btm->module = module;
1936                         list_add(&btm->list, &bpf_trace_modules);
1937                 } else {
1938                         ret = -ENOMEM;
1939                 }
1940                 break;
1941         case MODULE_STATE_GOING:
1942                 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1943                         if (btm->module == module) {
1944                                 list_del(&btm->list);
1945                                 kfree(btm);
1946                                 break;
1947                         }
1948                 }
1949                 break;
1950         }
1951
1952         mutex_unlock(&bpf_module_mutex);
1953
1954 out:
1955         return notifier_from_errno(ret);
1956 }
1957
1958 static struct notifier_block bpf_module_nb = {
1959         .notifier_call = bpf_event_notify,
1960 };
1961
1962 static int __init bpf_event_init(void)
1963 {
1964         register_module_notifier(&bpf_module_nb);
1965         return 0;
1966 }
1967
1968 fs_initcall(bpf_event_init);
1969 #endif /* CONFIG_MODULES */