Merge tag 'for-5.13-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / kernel / trace / bpf_trace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/btf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/syscalls.h>
17 #include <linux/error-injection.h>
18 #include <linux/btf_ids.h>
19 #include <linux/bpf_lsm.h>
20
21 #include <net/bpf_sk_storage.h>
22
23 #include <uapi/linux/bpf.h>
24 #include <uapi/linux/btf.h>
25
26 #include <asm/tlb.h>
27
28 #include "trace_probe.h"
29 #include "trace.h"
30
31 #define CREATE_TRACE_POINTS
32 #include "bpf_trace.h"
33
34 #define bpf_event_rcu_dereference(p)                                    \
35         rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
36
37 #ifdef CONFIG_MODULES
38 struct bpf_trace_module {
39         struct module *module;
40         struct list_head list;
41 };
42
43 static LIST_HEAD(bpf_trace_modules);
44 static DEFINE_MUTEX(bpf_module_mutex);
45
46 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
47 {
48         struct bpf_raw_event_map *btp, *ret = NULL;
49         struct bpf_trace_module *btm;
50         unsigned int i;
51
52         mutex_lock(&bpf_module_mutex);
53         list_for_each_entry(btm, &bpf_trace_modules, list) {
54                 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
55                         btp = &btm->module->bpf_raw_events[i];
56                         if (!strcmp(btp->tp->name, name)) {
57                                 if (try_module_get(btm->module))
58                                         ret = btp;
59                                 goto out;
60                         }
61                 }
62         }
63 out:
64         mutex_unlock(&bpf_module_mutex);
65         return ret;
66 }
67 #else
68 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
69 {
70         return NULL;
71 }
72 #endif /* CONFIG_MODULES */
73
74 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
75 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
76
77 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
78                                   u64 flags, const struct btf **btf,
79                                   s32 *btf_id);
80
81 /**
82  * trace_call_bpf - invoke BPF program
83  * @call: tracepoint event
84  * @ctx: opaque context pointer
85  *
86  * kprobe handlers execute BPF programs via this helper.
87  * Can be used from static tracepoints in the future.
88  *
89  * Return: BPF programs always return an integer which is interpreted by
90  * kprobe handler as:
91  * 0 - return from kprobe (event is filtered out)
92  * 1 - store kprobe event into ring buffer
93  * Other values are reserved and currently alias to 1
94  */
95 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
96 {
97         unsigned int ret;
98
99         cant_sleep();
100
101         if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
102                 /*
103                  * since some bpf program is already running on this cpu,
104                  * don't call into another bpf program (same or different)
105                  * and don't send kprobe event into ring-buffer,
106                  * so return zero here
107                  */
108                 ret = 0;
109                 goto out;
110         }
111
112         /*
113          * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114          * to all call sites, we did a bpf_prog_array_valid() there to check
115          * whether call->prog_array is empty or not, which is
116          * a heuristic to speed up execution.
117          *
118          * If bpf_prog_array_valid() fetched prog_array was
119          * non-NULL, we go into trace_call_bpf() and do the actual
120          * proper rcu_dereference() under RCU lock.
121          * If it turns out that prog_array is NULL then, we bail out.
122          * For the opposite, if the bpf_prog_array_valid() fetched pointer
123          * was NULL, you'll skip the prog_array with the risk of missing
124          * out of events when it was updated in between this and the
125          * rcu_dereference() which is accepted risk.
126          */
127         ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
128
129  out:
130         __this_cpu_dec(bpf_prog_active);
131
132         return ret;
133 }
134
135 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
136 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
137 {
138         regs_set_return_value(regs, rc);
139         override_function_with_return(regs);
140         return 0;
141 }
142
143 static const struct bpf_func_proto bpf_override_return_proto = {
144         .func           = bpf_override_return,
145         .gpl_only       = true,
146         .ret_type       = RET_INTEGER,
147         .arg1_type      = ARG_PTR_TO_CTX,
148         .arg2_type      = ARG_ANYTHING,
149 };
150 #endif
151
152 static __always_inline int
153 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
154 {
155         int ret;
156
157         ret = copy_from_user_nofault(dst, unsafe_ptr, size);
158         if (unlikely(ret < 0))
159                 memset(dst, 0, size);
160         return ret;
161 }
162
163 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164            const void __user *, unsafe_ptr)
165 {
166         return bpf_probe_read_user_common(dst, size, unsafe_ptr);
167 }
168
169 const struct bpf_func_proto bpf_probe_read_user_proto = {
170         .func           = bpf_probe_read_user,
171         .gpl_only       = true,
172         .ret_type       = RET_INTEGER,
173         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
174         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
175         .arg3_type      = ARG_ANYTHING,
176 };
177
178 static __always_inline int
179 bpf_probe_read_user_str_common(void *dst, u32 size,
180                                const void __user *unsafe_ptr)
181 {
182         int ret;
183
184         /*
185          * NB: We rely on strncpy_from_user() not copying junk past the NUL
186          * terminator into `dst`.
187          *
188          * strncpy_from_user() does long-sized strides in the fast path. If the
189          * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
190          * then there could be junk after the NUL in `dst`. If user takes `dst`
191          * and keys a hash map with it, then semantically identical strings can
192          * occupy multiple entries in the map.
193          */
194         ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
195         if (unlikely(ret < 0))
196                 memset(dst, 0, size);
197         return ret;
198 }
199
200 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
201            const void __user *, unsafe_ptr)
202 {
203         return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
204 }
205
206 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
207         .func           = bpf_probe_read_user_str,
208         .gpl_only       = true,
209         .ret_type       = RET_INTEGER,
210         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
211         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
212         .arg3_type      = ARG_ANYTHING,
213 };
214
215 static __always_inline int
216 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
217 {
218         int ret = security_locked_down(LOCKDOWN_BPF_READ);
219
220         if (unlikely(ret < 0))
221                 goto fail;
222         ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
223         if (unlikely(ret < 0))
224                 goto fail;
225         return ret;
226 fail:
227         memset(dst, 0, size);
228         return ret;
229 }
230
231 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
232            const void *, unsafe_ptr)
233 {
234         return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
235 }
236
237 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
238         .func           = bpf_probe_read_kernel,
239         .gpl_only       = true,
240         .ret_type       = RET_INTEGER,
241         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
242         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
243         .arg3_type      = ARG_ANYTHING,
244 };
245
246 static __always_inline int
247 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
248 {
249         int ret = security_locked_down(LOCKDOWN_BPF_READ);
250
251         if (unlikely(ret < 0))
252                 goto fail;
253
254         /*
255          * The strncpy_from_kernel_nofault() call will likely not fill the
256          * entire buffer, but that's okay in this circumstance as we're probing
257          * arbitrary memory anyway similar to bpf_probe_read_*() and might
258          * as well probe the stack. Thus, memory is explicitly cleared
259          * only in error case, so that improper users ignoring return
260          * code altogether don't copy garbage; otherwise length of string
261          * is returned that can be used for bpf_perf_event_output() et al.
262          */
263         ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
264         if (unlikely(ret < 0))
265                 goto fail;
266
267         return ret;
268 fail:
269         memset(dst, 0, size);
270         return ret;
271 }
272
273 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
274            const void *, unsafe_ptr)
275 {
276         return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
277 }
278
279 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
280         .func           = bpf_probe_read_kernel_str,
281         .gpl_only       = true,
282         .ret_type       = RET_INTEGER,
283         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
284         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
285         .arg3_type      = ARG_ANYTHING,
286 };
287
288 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
289 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
290            const void *, unsafe_ptr)
291 {
292         if ((unsigned long)unsafe_ptr < TASK_SIZE) {
293                 return bpf_probe_read_user_common(dst, size,
294                                 (__force void __user *)unsafe_ptr);
295         }
296         return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
297 }
298
299 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
300         .func           = bpf_probe_read_compat,
301         .gpl_only       = true,
302         .ret_type       = RET_INTEGER,
303         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
304         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
305         .arg3_type      = ARG_ANYTHING,
306 };
307
308 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
309            const void *, unsafe_ptr)
310 {
311         if ((unsigned long)unsafe_ptr < TASK_SIZE) {
312                 return bpf_probe_read_user_str_common(dst, size,
313                                 (__force void __user *)unsafe_ptr);
314         }
315         return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
316 }
317
318 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
319         .func           = bpf_probe_read_compat_str,
320         .gpl_only       = true,
321         .ret_type       = RET_INTEGER,
322         .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
323         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
324         .arg3_type      = ARG_ANYTHING,
325 };
326 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
327
328 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
329            u32, size)
330 {
331         /*
332          * Ensure we're in user context which is safe for the helper to
333          * run. This helper has no business in a kthread.
334          *
335          * access_ok() should prevent writing to non-user memory, but in
336          * some situations (nommu, temporary switch, etc) access_ok() does
337          * not provide enough validation, hence the check on KERNEL_DS.
338          *
339          * nmi_uaccess_okay() ensures the probe is not run in an interim
340          * state, when the task or mm are switched. This is specifically
341          * required to prevent the use of temporary mm.
342          */
343
344         if (unlikely(in_interrupt() ||
345                      current->flags & (PF_KTHREAD | PF_EXITING)))
346                 return -EPERM;
347         if (unlikely(uaccess_kernel()))
348                 return -EPERM;
349         if (unlikely(!nmi_uaccess_okay()))
350                 return -EPERM;
351
352         return copy_to_user_nofault(unsafe_ptr, src, size);
353 }
354
355 static const struct bpf_func_proto bpf_probe_write_user_proto = {
356         .func           = bpf_probe_write_user,
357         .gpl_only       = true,
358         .ret_type       = RET_INTEGER,
359         .arg1_type      = ARG_ANYTHING,
360         .arg2_type      = ARG_PTR_TO_MEM,
361         .arg3_type      = ARG_CONST_SIZE,
362 };
363
364 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
365 {
366         if (!capable(CAP_SYS_ADMIN))
367                 return NULL;
368
369         pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
370                             current->comm, task_pid_nr(current));
371
372         return &bpf_probe_write_user_proto;
373 }
374
375 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
376
377 #define MAX_TRACE_PRINTK_VARARGS        3
378 #define BPF_TRACE_PRINTK_SIZE           1024
379
380 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
381            u64, arg2, u64, arg3)
382 {
383         u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
384         u32 *bin_args;
385         static char buf[BPF_TRACE_PRINTK_SIZE];
386         unsigned long flags;
387         int ret;
388
389         ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
390                                   MAX_TRACE_PRINTK_VARARGS);
391         if (ret < 0)
392                 return ret;
393
394         raw_spin_lock_irqsave(&trace_printk_lock, flags);
395         ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
396
397         trace_bpf_trace_printk(buf);
398         raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
399
400         bpf_bprintf_cleanup();
401
402         return ret;
403 }
404
405 static const struct bpf_func_proto bpf_trace_printk_proto = {
406         .func           = bpf_trace_printk,
407         .gpl_only       = true,
408         .ret_type       = RET_INTEGER,
409         .arg1_type      = ARG_PTR_TO_MEM,
410         .arg2_type      = ARG_CONST_SIZE,
411 };
412
413 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
414 {
415         /*
416          * This program might be calling bpf_trace_printk,
417          * so enable the associated bpf_trace/bpf_trace_printk event.
418          * Repeat this each time as it is possible a user has
419          * disabled bpf_trace_printk events.  By loading a program
420          * calling bpf_trace_printk() however the user has expressed
421          * the intent to see such events.
422          */
423         if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
424                 pr_warn_ratelimited("could not enable bpf_trace_printk events");
425
426         return &bpf_trace_printk_proto;
427 }
428
429 #define MAX_SEQ_PRINTF_VARARGS          12
430
431 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
432            const void *, data, u32, data_len)
433 {
434         int err, num_args;
435         u32 *bin_args;
436
437         if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 ||
438             (data_len && !data))
439                 return -EINVAL;
440         num_args = data_len / 8;
441
442         err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
443         if (err < 0)
444                 return err;
445
446         seq_bprintf(m, fmt, bin_args);
447
448         bpf_bprintf_cleanup();
449
450         return seq_has_overflowed(m) ? -EOVERFLOW : 0;
451 }
452
453 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
454
455 static const struct bpf_func_proto bpf_seq_printf_proto = {
456         .func           = bpf_seq_printf,
457         .gpl_only       = true,
458         .ret_type       = RET_INTEGER,
459         .arg1_type      = ARG_PTR_TO_BTF_ID,
460         .arg1_btf_id    = &btf_seq_file_ids[0],
461         .arg2_type      = ARG_PTR_TO_MEM,
462         .arg3_type      = ARG_CONST_SIZE,
463         .arg4_type      = ARG_PTR_TO_MEM_OR_NULL,
464         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
465 };
466
467 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
468 {
469         return seq_write(m, data, len) ? -EOVERFLOW : 0;
470 }
471
472 static const struct bpf_func_proto bpf_seq_write_proto = {
473         .func           = bpf_seq_write,
474         .gpl_only       = true,
475         .ret_type       = RET_INTEGER,
476         .arg1_type      = ARG_PTR_TO_BTF_ID,
477         .arg1_btf_id    = &btf_seq_file_ids[0],
478         .arg2_type      = ARG_PTR_TO_MEM,
479         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
480 };
481
482 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
483            u32, btf_ptr_size, u64, flags)
484 {
485         const struct btf *btf;
486         s32 btf_id;
487         int ret;
488
489         ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
490         if (ret)
491                 return ret;
492
493         return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
494 }
495
496 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
497         .func           = bpf_seq_printf_btf,
498         .gpl_only       = true,
499         .ret_type       = RET_INTEGER,
500         .arg1_type      = ARG_PTR_TO_BTF_ID,
501         .arg1_btf_id    = &btf_seq_file_ids[0],
502         .arg2_type      = ARG_PTR_TO_MEM,
503         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
504         .arg4_type      = ARG_ANYTHING,
505 };
506
507 static __always_inline int
508 get_map_perf_counter(struct bpf_map *map, u64 flags,
509                      u64 *value, u64 *enabled, u64 *running)
510 {
511         struct bpf_array *array = container_of(map, struct bpf_array, map);
512         unsigned int cpu = smp_processor_id();
513         u64 index = flags & BPF_F_INDEX_MASK;
514         struct bpf_event_entry *ee;
515
516         if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
517                 return -EINVAL;
518         if (index == BPF_F_CURRENT_CPU)
519                 index = cpu;
520         if (unlikely(index >= array->map.max_entries))
521                 return -E2BIG;
522
523         ee = READ_ONCE(array->ptrs[index]);
524         if (!ee)
525                 return -ENOENT;
526
527         return perf_event_read_local(ee->event, value, enabled, running);
528 }
529
530 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
531 {
532         u64 value = 0;
533         int err;
534
535         err = get_map_perf_counter(map, flags, &value, NULL, NULL);
536         /*
537          * this api is ugly since we miss [-22..-2] range of valid
538          * counter values, but that's uapi
539          */
540         if (err)
541                 return err;
542         return value;
543 }
544
545 static const struct bpf_func_proto bpf_perf_event_read_proto = {
546         .func           = bpf_perf_event_read,
547         .gpl_only       = true,
548         .ret_type       = RET_INTEGER,
549         .arg1_type      = ARG_CONST_MAP_PTR,
550         .arg2_type      = ARG_ANYTHING,
551 };
552
553 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
554            struct bpf_perf_event_value *, buf, u32, size)
555 {
556         int err = -EINVAL;
557
558         if (unlikely(size != sizeof(struct bpf_perf_event_value)))
559                 goto clear;
560         err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
561                                    &buf->running);
562         if (unlikely(err))
563                 goto clear;
564         return 0;
565 clear:
566         memset(buf, 0, size);
567         return err;
568 }
569
570 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
571         .func           = bpf_perf_event_read_value,
572         .gpl_only       = true,
573         .ret_type       = RET_INTEGER,
574         .arg1_type      = ARG_CONST_MAP_PTR,
575         .arg2_type      = ARG_ANYTHING,
576         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
577         .arg4_type      = ARG_CONST_SIZE,
578 };
579
580 static __always_inline u64
581 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
582                         u64 flags, struct perf_sample_data *sd)
583 {
584         struct bpf_array *array = container_of(map, struct bpf_array, map);
585         unsigned int cpu = smp_processor_id();
586         u64 index = flags & BPF_F_INDEX_MASK;
587         struct bpf_event_entry *ee;
588         struct perf_event *event;
589
590         if (index == BPF_F_CURRENT_CPU)
591                 index = cpu;
592         if (unlikely(index >= array->map.max_entries))
593                 return -E2BIG;
594
595         ee = READ_ONCE(array->ptrs[index]);
596         if (!ee)
597                 return -ENOENT;
598
599         event = ee->event;
600         if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
601                      event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
602                 return -EINVAL;
603
604         if (unlikely(event->oncpu != cpu))
605                 return -EOPNOTSUPP;
606
607         return perf_event_output(event, sd, regs);
608 }
609
610 /*
611  * Support executing tracepoints in normal, irq, and nmi context that each call
612  * bpf_perf_event_output
613  */
614 struct bpf_trace_sample_data {
615         struct perf_sample_data sds[3];
616 };
617
618 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
619 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
620 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
621            u64, flags, void *, data, u64, size)
622 {
623         struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
624         int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
625         struct perf_raw_record raw = {
626                 .frag = {
627                         .size = size,
628                         .data = data,
629                 },
630         };
631         struct perf_sample_data *sd;
632         int err;
633
634         if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
635                 err = -EBUSY;
636                 goto out;
637         }
638
639         sd = &sds->sds[nest_level - 1];
640
641         if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
642                 err = -EINVAL;
643                 goto out;
644         }
645
646         perf_sample_data_init(sd, 0, 0);
647         sd->raw = &raw;
648
649         err = __bpf_perf_event_output(regs, map, flags, sd);
650
651 out:
652         this_cpu_dec(bpf_trace_nest_level);
653         return err;
654 }
655
656 static const struct bpf_func_proto bpf_perf_event_output_proto = {
657         .func           = bpf_perf_event_output,
658         .gpl_only       = true,
659         .ret_type       = RET_INTEGER,
660         .arg1_type      = ARG_PTR_TO_CTX,
661         .arg2_type      = ARG_CONST_MAP_PTR,
662         .arg3_type      = ARG_ANYTHING,
663         .arg4_type      = ARG_PTR_TO_MEM,
664         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
665 };
666
667 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
668 struct bpf_nested_pt_regs {
669         struct pt_regs regs[3];
670 };
671 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
672 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
673
674 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
675                      void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
676 {
677         int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
678         struct perf_raw_frag frag = {
679                 .copy           = ctx_copy,
680                 .size           = ctx_size,
681                 .data           = ctx,
682         };
683         struct perf_raw_record raw = {
684                 .frag = {
685                         {
686                                 .next   = ctx_size ? &frag : NULL,
687                         },
688                         .size   = meta_size,
689                         .data   = meta,
690                 },
691         };
692         struct perf_sample_data *sd;
693         struct pt_regs *regs;
694         u64 ret;
695
696         if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
697                 ret = -EBUSY;
698                 goto out;
699         }
700         sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
701         regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
702
703         perf_fetch_caller_regs(regs);
704         perf_sample_data_init(sd, 0, 0);
705         sd->raw = &raw;
706
707         ret = __bpf_perf_event_output(regs, map, flags, sd);
708 out:
709         this_cpu_dec(bpf_event_output_nest_level);
710         return ret;
711 }
712
713 BPF_CALL_0(bpf_get_current_task)
714 {
715         return (long) current;
716 }
717
718 const struct bpf_func_proto bpf_get_current_task_proto = {
719         .func           = bpf_get_current_task,
720         .gpl_only       = true,
721         .ret_type       = RET_INTEGER,
722 };
723
724 BPF_CALL_0(bpf_get_current_task_btf)
725 {
726         return (unsigned long) current;
727 }
728
729 BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
730
731 static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
732         .func           = bpf_get_current_task_btf,
733         .gpl_only       = true,
734         .ret_type       = RET_PTR_TO_BTF_ID,
735         .ret_btf_id     = &bpf_get_current_btf_ids[0],
736 };
737
738 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
739 {
740         struct bpf_array *array = container_of(map, struct bpf_array, map);
741         struct cgroup *cgrp;
742
743         if (unlikely(idx >= array->map.max_entries))
744                 return -E2BIG;
745
746         cgrp = READ_ONCE(array->ptrs[idx]);
747         if (unlikely(!cgrp))
748                 return -EAGAIN;
749
750         return task_under_cgroup_hierarchy(current, cgrp);
751 }
752
753 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
754         .func           = bpf_current_task_under_cgroup,
755         .gpl_only       = false,
756         .ret_type       = RET_INTEGER,
757         .arg1_type      = ARG_CONST_MAP_PTR,
758         .arg2_type      = ARG_ANYTHING,
759 };
760
761 struct send_signal_irq_work {
762         struct irq_work irq_work;
763         struct task_struct *task;
764         u32 sig;
765         enum pid_type type;
766 };
767
768 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
769
770 static void do_bpf_send_signal(struct irq_work *entry)
771 {
772         struct send_signal_irq_work *work;
773
774         work = container_of(entry, struct send_signal_irq_work, irq_work);
775         group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
776 }
777
778 static int bpf_send_signal_common(u32 sig, enum pid_type type)
779 {
780         struct send_signal_irq_work *work = NULL;
781
782         /* Similar to bpf_probe_write_user, task needs to be
783          * in a sound condition and kernel memory access be
784          * permitted in order to send signal to the current
785          * task.
786          */
787         if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
788                 return -EPERM;
789         if (unlikely(uaccess_kernel()))
790                 return -EPERM;
791         if (unlikely(!nmi_uaccess_okay()))
792                 return -EPERM;
793
794         if (irqs_disabled()) {
795                 /* Do an early check on signal validity. Otherwise,
796                  * the error is lost in deferred irq_work.
797                  */
798                 if (unlikely(!valid_signal(sig)))
799                         return -EINVAL;
800
801                 work = this_cpu_ptr(&send_signal_work);
802                 if (irq_work_is_busy(&work->irq_work))
803                         return -EBUSY;
804
805                 /* Add the current task, which is the target of sending signal,
806                  * to the irq_work. The current task may change when queued
807                  * irq works get executed.
808                  */
809                 work->task = current;
810                 work->sig = sig;
811                 work->type = type;
812                 irq_work_queue(&work->irq_work);
813                 return 0;
814         }
815
816         return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
817 }
818
819 BPF_CALL_1(bpf_send_signal, u32, sig)
820 {
821         return bpf_send_signal_common(sig, PIDTYPE_TGID);
822 }
823
824 static const struct bpf_func_proto bpf_send_signal_proto = {
825         .func           = bpf_send_signal,
826         .gpl_only       = false,
827         .ret_type       = RET_INTEGER,
828         .arg1_type      = ARG_ANYTHING,
829 };
830
831 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
832 {
833         return bpf_send_signal_common(sig, PIDTYPE_PID);
834 }
835
836 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
837         .func           = bpf_send_signal_thread,
838         .gpl_only       = false,
839         .ret_type       = RET_INTEGER,
840         .arg1_type      = ARG_ANYTHING,
841 };
842
843 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
844 {
845         long len;
846         char *p;
847
848         if (!sz)
849                 return 0;
850
851         p = d_path(path, buf, sz);
852         if (IS_ERR(p)) {
853                 len = PTR_ERR(p);
854         } else {
855                 len = buf + sz - p;
856                 memmove(buf, p, len);
857         }
858
859         return len;
860 }
861
862 BTF_SET_START(btf_allowlist_d_path)
863 #ifdef CONFIG_SECURITY
864 BTF_ID(func, security_file_permission)
865 BTF_ID(func, security_inode_getattr)
866 BTF_ID(func, security_file_open)
867 #endif
868 #ifdef CONFIG_SECURITY_PATH
869 BTF_ID(func, security_path_truncate)
870 #endif
871 BTF_ID(func, vfs_truncate)
872 BTF_ID(func, vfs_fallocate)
873 BTF_ID(func, dentry_open)
874 BTF_ID(func, vfs_getattr)
875 BTF_ID(func, filp_close)
876 BTF_SET_END(btf_allowlist_d_path)
877
878 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
879 {
880         if (prog->type == BPF_PROG_TYPE_TRACING &&
881             prog->expected_attach_type == BPF_TRACE_ITER)
882                 return true;
883
884         if (prog->type == BPF_PROG_TYPE_LSM)
885                 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
886
887         return btf_id_set_contains(&btf_allowlist_d_path,
888                                    prog->aux->attach_btf_id);
889 }
890
891 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
892
893 static const struct bpf_func_proto bpf_d_path_proto = {
894         .func           = bpf_d_path,
895         .gpl_only       = false,
896         .ret_type       = RET_INTEGER,
897         .arg1_type      = ARG_PTR_TO_BTF_ID,
898         .arg1_btf_id    = &bpf_d_path_btf_ids[0],
899         .arg2_type      = ARG_PTR_TO_MEM,
900         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
901         .allowed        = bpf_d_path_allowed,
902 };
903
904 #define BTF_F_ALL       (BTF_F_COMPACT  | BTF_F_NONAME | \
905                          BTF_F_PTR_RAW | BTF_F_ZERO)
906
907 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
908                                   u64 flags, const struct btf **btf,
909                                   s32 *btf_id)
910 {
911         const struct btf_type *t;
912
913         if (unlikely(flags & ~(BTF_F_ALL)))
914                 return -EINVAL;
915
916         if (btf_ptr_size != sizeof(struct btf_ptr))
917                 return -EINVAL;
918
919         *btf = bpf_get_btf_vmlinux();
920
921         if (IS_ERR_OR_NULL(*btf))
922                 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
923
924         if (ptr->type_id > 0)
925                 *btf_id = ptr->type_id;
926         else
927                 return -EINVAL;
928
929         if (*btf_id > 0)
930                 t = btf_type_by_id(*btf, *btf_id);
931         if (*btf_id <= 0 || !t)
932                 return -ENOENT;
933
934         return 0;
935 }
936
937 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
938            u32, btf_ptr_size, u64, flags)
939 {
940         const struct btf *btf;
941         s32 btf_id;
942         int ret;
943
944         ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
945         if (ret)
946                 return ret;
947
948         return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
949                                       flags);
950 }
951
952 const struct bpf_func_proto bpf_snprintf_btf_proto = {
953         .func           = bpf_snprintf_btf,
954         .gpl_only       = false,
955         .ret_type       = RET_INTEGER,
956         .arg1_type      = ARG_PTR_TO_MEM,
957         .arg2_type      = ARG_CONST_SIZE,
958         .arg3_type      = ARG_PTR_TO_MEM,
959         .arg4_type      = ARG_CONST_SIZE,
960         .arg5_type      = ARG_ANYTHING,
961 };
962
963 const struct bpf_func_proto *
964 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
965 {
966         switch (func_id) {
967         case BPF_FUNC_map_lookup_elem:
968                 return &bpf_map_lookup_elem_proto;
969         case BPF_FUNC_map_update_elem:
970                 return &bpf_map_update_elem_proto;
971         case BPF_FUNC_map_delete_elem:
972                 return &bpf_map_delete_elem_proto;
973         case BPF_FUNC_map_push_elem:
974                 return &bpf_map_push_elem_proto;
975         case BPF_FUNC_map_pop_elem:
976                 return &bpf_map_pop_elem_proto;
977         case BPF_FUNC_map_peek_elem:
978                 return &bpf_map_peek_elem_proto;
979         case BPF_FUNC_ktime_get_ns:
980                 return &bpf_ktime_get_ns_proto;
981         case BPF_FUNC_ktime_get_boot_ns:
982                 return &bpf_ktime_get_boot_ns_proto;
983         case BPF_FUNC_ktime_get_coarse_ns:
984                 return &bpf_ktime_get_coarse_ns_proto;
985         case BPF_FUNC_tail_call:
986                 return &bpf_tail_call_proto;
987         case BPF_FUNC_get_current_pid_tgid:
988                 return &bpf_get_current_pid_tgid_proto;
989         case BPF_FUNC_get_current_task:
990                 return &bpf_get_current_task_proto;
991         case BPF_FUNC_get_current_task_btf:
992                 return &bpf_get_current_task_btf_proto;
993         case BPF_FUNC_get_current_uid_gid:
994                 return &bpf_get_current_uid_gid_proto;
995         case BPF_FUNC_get_current_comm:
996                 return &bpf_get_current_comm_proto;
997         case BPF_FUNC_trace_printk:
998                 return bpf_get_trace_printk_proto();
999         case BPF_FUNC_get_smp_processor_id:
1000                 return &bpf_get_smp_processor_id_proto;
1001         case BPF_FUNC_get_numa_node_id:
1002                 return &bpf_get_numa_node_id_proto;
1003         case BPF_FUNC_perf_event_read:
1004                 return &bpf_perf_event_read_proto;
1005         case BPF_FUNC_probe_write_user:
1006                 return bpf_get_probe_write_proto();
1007         case BPF_FUNC_current_task_under_cgroup:
1008                 return &bpf_current_task_under_cgroup_proto;
1009         case BPF_FUNC_get_prandom_u32:
1010                 return &bpf_get_prandom_u32_proto;
1011         case BPF_FUNC_probe_read_user:
1012                 return &bpf_probe_read_user_proto;
1013         case BPF_FUNC_probe_read_kernel:
1014                 return &bpf_probe_read_kernel_proto;
1015         case BPF_FUNC_probe_read_user_str:
1016                 return &bpf_probe_read_user_str_proto;
1017         case BPF_FUNC_probe_read_kernel_str:
1018                 return &bpf_probe_read_kernel_str_proto;
1019 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1020         case BPF_FUNC_probe_read:
1021                 return &bpf_probe_read_compat_proto;
1022         case BPF_FUNC_probe_read_str:
1023                 return &bpf_probe_read_compat_str_proto;
1024 #endif
1025 #ifdef CONFIG_CGROUPS
1026         case BPF_FUNC_get_current_cgroup_id:
1027                 return &bpf_get_current_cgroup_id_proto;
1028 #endif
1029         case BPF_FUNC_send_signal:
1030                 return &bpf_send_signal_proto;
1031         case BPF_FUNC_send_signal_thread:
1032                 return &bpf_send_signal_thread_proto;
1033         case BPF_FUNC_perf_event_read_value:
1034                 return &bpf_perf_event_read_value_proto;
1035         case BPF_FUNC_get_ns_current_pid_tgid:
1036                 return &bpf_get_ns_current_pid_tgid_proto;
1037         case BPF_FUNC_ringbuf_output:
1038                 return &bpf_ringbuf_output_proto;
1039         case BPF_FUNC_ringbuf_reserve:
1040                 return &bpf_ringbuf_reserve_proto;
1041         case BPF_FUNC_ringbuf_submit:
1042                 return &bpf_ringbuf_submit_proto;
1043         case BPF_FUNC_ringbuf_discard:
1044                 return &bpf_ringbuf_discard_proto;
1045         case BPF_FUNC_ringbuf_query:
1046                 return &bpf_ringbuf_query_proto;
1047         case BPF_FUNC_jiffies64:
1048                 return &bpf_jiffies64_proto;
1049         case BPF_FUNC_get_task_stack:
1050                 return &bpf_get_task_stack_proto;
1051         case BPF_FUNC_copy_from_user:
1052                 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1053         case BPF_FUNC_snprintf_btf:
1054                 return &bpf_snprintf_btf_proto;
1055         case BPF_FUNC_per_cpu_ptr:
1056                 return &bpf_per_cpu_ptr_proto;
1057         case BPF_FUNC_this_cpu_ptr:
1058                 return &bpf_this_cpu_ptr_proto;
1059         case BPF_FUNC_task_storage_get:
1060                 return &bpf_task_storage_get_proto;
1061         case BPF_FUNC_task_storage_delete:
1062                 return &bpf_task_storage_delete_proto;
1063         case BPF_FUNC_for_each_map_elem:
1064                 return &bpf_for_each_map_elem_proto;
1065         case BPF_FUNC_snprintf:
1066                 return &bpf_snprintf_proto;
1067         default:
1068                 return NULL;
1069         }
1070 }
1071
1072 static const struct bpf_func_proto *
1073 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1074 {
1075         switch (func_id) {
1076         case BPF_FUNC_perf_event_output:
1077                 return &bpf_perf_event_output_proto;
1078         case BPF_FUNC_get_stackid:
1079                 return &bpf_get_stackid_proto;
1080         case BPF_FUNC_get_stack:
1081                 return &bpf_get_stack_proto;
1082 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1083         case BPF_FUNC_override_return:
1084                 return &bpf_override_return_proto;
1085 #endif
1086         default:
1087                 return bpf_tracing_func_proto(func_id, prog);
1088         }
1089 }
1090
1091 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1092 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1093                                         const struct bpf_prog *prog,
1094                                         struct bpf_insn_access_aux *info)
1095 {
1096         if (off < 0 || off >= sizeof(struct pt_regs))
1097                 return false;
1098         if (type != BPF_READ)
1099                 return false;
1100         if (off % size != 0)
1101                 return false;
1102         /*
1103          * Assertion for 32 bit to make sure last 8 byte access
1104          * (BPF_DW) to the last 4 byte member is disallowed.
1105          */
1106         if (off + size > sizeof(struct pt_regs))
1107                 return false;
1108
1109         return true;
1110 }
1111
1112 const struct bpf_verifier_ops kprobe_verifier_ops = {
1113         .get_func_proto  = kprobe_prog_func_proto,
1114         .is_valid_access = kprobe_prog_is_valid_access,
1115 };
1116
1117 const struct bpf_prog_ops kprobe_prog_ops = {
1118 };
1119
1120 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1121            u64, flags, void *, data, u64, size)
1122 {
1123         struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1124
1125         /*
1126          * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1127          * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1128          * from there and call the same bpf_perf_event_output() helper inline.
1129          */
1130         return ____bpf_perf_event_output(regs, map, flags, data, size);
1131 }
1132
1133 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1134         .func           = bpf_perf_event_output_tp,
1135         .gpl_only       = true,
1136         .ret_type       = RET_INTEGER,
1137         .arg1_type      = ARG_PTR_TO_CTX,
1138         .arg2_type      = ARG_CONST_MAP_PTR,
1139         .arg3_type      = ARG_ANYTHING,
1140         .arg4_type      = ARG_PTR_TO_MEM,
1141         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
1142 };
1143
1144 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1145            u64, flags)
1146 {
1147         struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1148
1149         /*
1150          * Same comment as in bpf_perf_event_output_tp(), only that this time
1151          * the other helper's function body cannot be inlined due to being
1152          * external, thus we need to call raw helper function.
1153          */
1154         return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1155                                flags, 0, 0);
1156 }
1157
1158 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1159         .func           = bpf_get_stackid_tp,
1160         .gpl_only       = true,
1161         .ret_type       = RET_INTEGER,
1162         .arg1_type      = ARG_PTR_TO_CTX,
1163         .arg2_type      = ARG_CONST_MAP_PTR,
1164         .arg3_type      = ARG_ANYTHING,
1165 };
1166
1167 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1168            u64, flags)
1169 {
1170         struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1171
1172         return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1173                              (unsigned long) size, flags, 0);
1174 }
1175
1176 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1177         .func           = bpf_get_stack_tp,
1178         .gpl_only       = true,
1179         .ret_type       = RET_INTEGER,
1180         .arg1_type      = ARG_PTR_TO_CTX,
1181         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1182         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1183         .arg4_type      = ARG_ANYTHING,
1184 };
1185
1186 static const struct bpf_func_proto *
1187 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1188 {
1189         switch (func_id) {
1190         case BPF_FUNC_perf_event_output:
1191                 return &bpf_perf_event_output_proto_tp;
1192         case BPF_FUNC_get_stackid:
1193                 return &bpf_get_stackid_proto_tp;
1194         case BPF_FUNC_get_stack:
1195                 return &bpf_get_stack_proto_tp;
1196         default:
1197                 return bpf_tracing_func_proto(func_id, prog);
1198         }
1199 }
1200
1201 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1202                                     const struct bpf_prog *prog,
1203                                     struct bpf_insn_access_aux *info)
1204 {
1205         if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1206                 return false;
1207         if (type != BPF_READ)
1208                 return false;
1209         if (off % size != 0)
1210                 return false;
1211
1212         BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1213         return true;
1214 }
1215
1216 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1217         .get_func_proto  = tp_prog_func_proto,
1218         .is_valid_access = tp_prog_is_valid_access,
1219 };
1220
1221 const struct bpf_prog_ops tracepoint_prog_ops = {
1222 };
1223
1224 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1225            struct bpf_perf_event_value *, buf, u32, size)
1226 {
1227         int err = -EINVAL;
1228
1229         if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1230                 goto clear;
1231         err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1232                                     &buf->running);
1233         if (unlikely(err))
1234                 goto clear;
1235         return 0;
1236 clear:
1237         memset(buf, 0, size);
1238         return err;
1239 }
1240
1241 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1242          .func           = bpf_perf_prog_read_value,
1243          .gpl_only       = true,
1244          .ret_type       = RET_INTEGER,
1245          .arg1_type      = ARG_PTR_TO_CTX,
1246          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1247          .arg3_type      = ARG_CONST_SIZE,
1248 };
1249
1250 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1251            void *, buf, u32, size, u64, flags)
1252 {
1253 #ifndef CONFIG_X86
1254         return -ENOENT;
1255 #else
1256         static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1257         struct perf_branch_stack *br_stack = ctx->data->br_stack;
1258         u32 to_copy;
1259
1260         if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1261                 return -EINVAL;
1262
1263         if (unlikely(!br_stack))
1264                 return -EINVAL;
1265
1266         if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1267                 return br_stack->nr * br_entry_size;
1268
1269         if (!buf || (size % br_entry_size != 0))
1270                 return -EINVAL;
1271
1272         to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1273         memcpy(buf, br_stack->entries, to_copy);
1274
1275         return to_copy;
1276 #endif
1277 }
1278
1279 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1280         .func           = bpf_read_branch_records,
1281         .gpl_only       = true,
1282         .ret_type       = RET_INTEGER,
1283         .arg1_type      = ARG_PTR_TO_CTX,
1284         .arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1285         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1286         .arg4_type      = ARG_ANYTHING,
1287 };
1288
1289 static const struct bpf_func_proto *
1290 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1291 {
1292         switch (func_id) {
1293         case BPF_FUNC_perf_event_output:
1294                 return &bpf_perf_event_output_proto_tp;
1295         case BPF_FUNC_get_stackid:
1296                 return &bpf_get_stackid_proto_pe;
1297         case BPF_FUNC_get_stack:
1298                 return &bpf_get_stack_proto_pe;
1299         case BPF_FUNC_perf_prog_read_value:
1300                 return &bpf_perf_prog_read_value_proto;
1301         case BPF_FUNC_read_branch_records:
1302                 return &bpf_read_branch_records_proto;
1303         default:
1304                 return bpf_tracing_func_proto(func_id, prog);
1305         }
1306 }
1307
1308 /*
1309  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1310  * to avoid potential recursive reuse issue when/if tracepoints are added
1311  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1312  *
1313  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1314  * in normal, irq, and nmi context.
1315  */
1316 struct bpf_raw_tp_regs {
1317         struct pt_regs regs[3];
1318 };
1319 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1320 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1321 static struct pt_regs *get_bpf_raw_tp_regs(void)
1322 {
1323         struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1324         int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1325
1326         if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1327                 this_cpu_dec(bpf_raw_tp_nest_level);
1328                 return ERR_PTR(-EBUSY);
1329         }
1330
1331         return &tp_regs->regs[nest_level - 1];
1332 }
1333
1334 static void put_bpf_raw_tp_regs(void)
1335 {
1336         this_cpu_dec(bpf_raw_tp_nest_level);
1337 }
1338
1339 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1340            struct bpf_map *, map, u64, flags, void *, data, u64, size)
1341 {
1342         struct pt_regs *regs = get_bpf_raw_tp_regs();
1343         int ret;
1344
1345         if (IS_ERR(regs))
1346                 return PTR_ERR(regs);
1347
1348         perf_fetch_caller_regs(regs);
1349         ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1350
1351         put_bpf_raw_tp_regs();
1352         return ret;
1353 }
1354
1355 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1356         .func           = bpf_perf_event_output_raw_tp,
1357         .gpl_only       = true,
1358         .ret_type       = RET_INTEGER,
1359         .arg1_type      = ARG_PTR_TO_CTX,
1360         .arg2_type      = ARG_CONST_MAP_PTR,
1361         .arg3_type      = ARG_ANYTHING,
1362         .arg4_type      = ARG_PTR_TO_MEM,
1363         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
1364 };
1365
1366 extern const struct bpf_func_proto bpf_skb_output_proto;
1367 extern const struct bpf_func_proto bpf_xdp_output_proto;
1368
1369 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1370            struct bpf_map *, map, u64, flags)
1371 {
1372         struct pt_regs *regs = get_bpf_raw_tp_regs();
1373         int ret;
1374
1375         if (IS_ERR(regs))
1376                 return PTR_ERR(regs);
1377
1378         perf_fetch_caller_regs(regs);
1379         /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1380         ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1381                               flags, 0, 0);
1382         put_bpf_raw_tp_regs();
1383         return ret;
1384 }
1385
1386 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1387         .func           = bpf_get_stackid_raw_tp,
1388         .gpl_only       = true,
1389         .ret_type       = RET_INTEGER,
1390         .arg1_type      = ARG_PTR_TO_CTX,
1391         .arg2_type      = ARG_CONST_MAP_PTR,
1392         .arg3_type      = ARG_ANYTHING,
1393 };
1394
1395 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1396            void *, buf, u32, size, u64, flags)
1397 {
1398         struct pt_regs *regs = get_bpf_raw_tp_regs();
1399         int ret;
1400
1401         if (IS_ERR(regs))
1402                 return PTR_ERR(regs);
1403
1404         perf_fetch_caller_regs(regs);
1405         ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1406                             (unsigned long) size, flags, 0);
1407         put_bpf_raw_tp_regs();
1408         return ret;
1409 }
1410
1411 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1412         .func           = bpf_get_stack_raw_tp,
1413         .gpl_only       = true,
1414         .ret_type       = RET_INTEGER,
1415         .arg1_type      = ARG_PTR_TO_CTX,
1416         .arg2_type      = ARG_PTR_TO_MEM,
1417         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1418         .arg4_type      = ARG_ANYTHING,
1419 };
1420
1421 static const struct bpf_func_proto *
1422 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1423 {
1424         switch (func_id) {
1425         case BPF_FUNC_perf_event_output:
1426                 return &bpf_perf_event_output_proto_raw_tp;
1427         case BPF_FUNC_get_stackid:
1428                 return &bpf_get_stackid_proto_raw_tp;
1429         case BPF_FUNC_get_stack:
1430                 return &bpf_get_stack_proto_raw_tp;
1431         default:
1432                 return bpf_tracing_func_proto(func_id, prog);
1433         }
1434 }
1435
1436 const struct bpf_func_proto *
1437 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1438 {
1439         switch (func_id) {
1440 #ifdef CONFIG_NET
1441         case BPF_FUNC_skb_output:
1442                 return &bpf_skb_output_proto;
1443         case BPF_FUNC_xdp_output:
1444                 return &bpf_xdp_output_proto;
1445         case BPF_FUNC_skc_to_tcp6_sock:
1446                 return &bpf_skc_to_tcp6_sock_proto;
1447         case BPF_FUNC_skc_to_tcp_sock:
1448                 return &bpf_skc_to_tcp_sock_proto;
1449         case BPF_FUNC_skc_to_tcp_timewait_sock:
1450                 return &bpf_skc_to_tcp_timewait_sock_proto;
1451         case BPF_FUNC_skc_to_tcp_request_sock:
1452                 return &bpf_skc_to_tcp_request_sock_proto;
1453         case BPF_FUNC_skc_to_udp6_sock:
1454                 return &bpf_skc_to_udp6_sock_proto;
1455         case BPF_FUNC_sk_storage_get:
1456                 return &bpf_sk_storage_get_tracing_proto;
1457         case BPF_FUNC_sk_storage_delete:
1458                 return &bpf_sk_storage_delete_tracing_proto;
1459         case BPF_FUNC_sock_from_file:
1460                 return &bpf_sock_from_file_proto;
1461         case BPF_FUNC_get_socket_cookie:
1462                 return &bpf_get_socket_ptr_cookie_proto;
1463 #endif
1464         case BPF_FUNC_seq_printf:
1465                 return prog->expected_attach_type == BPF_TRACE_ITER ?
1466                        &bpf_seq_printf_proto :
1467                        NULL;
1468         case BPF_FUNC_seq_write:
1469                 return prog->expected_attach_type == BPF_TRACE_ITER ?
1470                        &bpf_seq_write_proto :
1471                        NULL;
1472         case BPF_FUNC_seq_printf_btf:
1473                 return prog->expected_attach_type == BPF_TRACE_ITER ?
1474                        &bpf_seq_printf_btf_proto :
1475                        NULL;
1476         case BPF_FUNC_d_path:
1477                 return &bpf_d_path_proto;
1478         default:
1479                 return raw_tp_prog_func_proto(func_id, prog);
1480         }
1481 }
1482
1483 static bool raw_tp_prog_is_valid_access(int off, int size,
1484                                         enum bpf_access_type type,
1485                                         const struct bpf_prog *prog,
1486                                         struct bpf_insn_access_aux *info)
1487 {
1488         if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1489                 return false;
1490         if (type != BPF_READ)
1491                 return false;
1492         if (off % size != 0)
1493                 return false;
1494         return true;
1495 }
1496
1497 static bool tracing_prog_is_valid_access(int off, int size,
1498                                          enum bpf_access_type type,
1499                                          const struct bpf_prog *prog,
1500                                          struct bpf_insn_access_aux *info)
1501 {
1502         if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1503                 return false;
1504         if (type != BPF_READ)
1505                 return false;
1506         if (off % size != 0)
1507                 return false;
1508         return btf_ctx_access(off, size, type, prog, info);
1509 }
1510
1511 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1512                                      const union bpf_attr *kattr,
1513                                      union bpf_attr __user *uattr)
1514 {
1515         return -ENOTSUPP;
1516 }
1517
1518 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1519         .get_func_proto  = raw_tp_prog_func_proto,
1520         .is_valid_access = raw_tp_prog_is_valid_access,
1521 };
1522
1523 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1524 #ifdef CONFIG_NET
1525         .test_run = bpf_prog_test_run_raw_tp,
1526 #endif
1527 };
1528
1529 const struct bpf_verifier_ops tracing_verifier_ops = {
1530         .get_func_proto  = tracing_prog_func_proto,
1531         .is_valid_access = tracing_prog_is_valid_access,
1532 };
1533
1534 const struct bpf_prog_ops tracing_prog_ops = {
1535         .test_run = bpf_prog_test_run_tracing,
1536 };
1537
1538 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1539                                                  enum bpf_access_type type,
1540                                                  const struct bpf_prog *prog,
1541                                                  struct bpf_insn_access_aux *info)
1542 {
1543         if (off == 0) {
1544                 if (size != sizeof(u64) || type != BPF_READ)
1545                         return false;
1546                 info->reg_type = PTR_TO_TP_BUFFER;
1547         }
1548         return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1549 }
1550
1551 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1552         .get_func_proto  = raw_tp_prog_func_proto,
1553         .is_valid_access = raw_tp_writable_prog_is_valid_access,
1554 };
1555
1556 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1557 };
1558
1559 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1560                                     const struct bpf_prog *prog,
1561                                     struct bpf_insn_access_aux *info)
1562 {
1563         const int size_u64 = sizeof(u64);
1564
1565         if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1566                 return false;
1567         if (type != BPF_READ)
1568                 return false;
1569         if (off % size != 0) {
1570                 if (sizeof(unsigned long) != 4)
1571                         return false;
1572                 if (size != 8)
1573                         return false;
1574                 if (off % size != 4)
1575                         return false;
1576         }
1577
1578         switch (off) {
1579         case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1580                 bpf_ctx_record_field_size(info, size_u64);
1581                 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1582                         return false;
1583                 break;
1584         case bpf_ctx_range(struct bpf_perf_event_data, addr):
1585                 bpf_ctx_record_field_size(info, size_u64);
1586                 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1587                         return false;
1588                 break;
1589         default:
1590                 if (size != sizeof(long))
1591                         return false;
1592         }
1593
1594         return true;
1595 }
1596
1597 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1598                                       const struct bpf_insn *si,
1599                                       struct bpf_insn *insn_buf,
1600                                       struct bpf_prog *prog, u32 *target_size)
1601 {
1602         struct bpf_insn *insn = insn_buf;
1603
1604         switch (si->off) {
1605         case offsetof(struct bpf_perf_event_data, sample_period):
1606                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1607                                                        data), si->dst_reg, si->src_reg,
1608                                       offsetof(struct bpf_perf_event_data_kern, data));
1609                 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1610                                       bpf_target_off(struct perf_sample_data, period, 8,
1611                                                      target_size));
1612                 break;
1613         case offsetof(struct bpf_perf_event_data, addr):
1614                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1615                                                        data), si->dst_reg, si->src_reg,
1616                                       offsetof(struct bpf_perf_event_data_kern, data));
1617                 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1618                                       bpf_target_off(struct perf_sample_data, addr, 8,
1619                                                      target_size));
1620                 break;
1621         default:
1622                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1623                                                        regs), si->dst_reg, si->src_reg,
1624                                       offsetof(struct bpf_perf_event_data_kern, regs));
1625                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1626                                       si->off);
1627                 break;
1628         }
1629
1630         return insn - insn_buf;
1631 }
1632
1633 const struct bpf_verifier_ops perf_event_verifier_ops = {
1634         .get_func_proto         = pe_prog_func_proto,
1635         .is_valid_access        = pe_prog_is_valid_access,
1636         .convert_ctx_access     = pe_prog_convert_ctx_access,
1637 };
1638
1639 const struct bpf_prog_ops perf_event_prog_ops = {
1640 };
1641
1642 static DEFINE_MUTEX(bpf_event_mutex);
1643
1644 #define BPF_TRACE_MAX_PROGS 64
1645
1646 int perf_event_attach_bpf_prog(struct perf_event *event,
1647                                struct bpf_prog *prog)
1648 {
1649         struct bpf_prog_array *old_array;
1650         struct bpf_prog_array *new_array;
1651         int ret = -EEXIST;
1652
1653         /*
1654          * Kprobe override only works if they are on the function entry,
1655          * and only if they are on the opt-in list.
1656          */
1657         if (prog->kprobe_override &&
1658             (!trace_kprobe_on_func_entry(event->tp_event) ||
1659              !trace_kprobe_error_injectable(event->tp_event)))
1660                 return -EINVAL;
1661
1662         mutex_lock(&bpf_event_mutex);
1663
1664         if (event->prog)
1665                 goto unlock;
1666
1667         old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1668         if (old_array &&
1669             bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1670                 ret = -E2BIG;
1671                 goto unlock;
1672         }
1673
1674         ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1675         if (ret < 0)
1676                 goto unlock;
1677
1678         /* set the new array to event->tp_event and set event->prog */
1679         event->prog = prog;
1680         rcu_assign_pointer(event->tp_event->prog_array, new_array);
1681         bpf_prog_array_free(old_array);
1682
1683 unlock:
1684         mutex_unlock(&bpf_event_mutex);
1685         return ret;
1686 }
1687
1688 void perf_event_detach_bpf_prog(struct perf_event *event)
1689 {
1690         struct bpf_prog_array *old_array;
1691         struct bpf_prog_array *new_array;
1692         int ret;
1693
1694         mutex_lock(&bpf_event_mutex);
1695
1696         if (!event->prog)
1697                 goto unlock;
1698
1699         old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1700         ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1701         if (ret == -ENOENT)
1702                 goto unlock;
1703         if (ret < 0) {
1704                 bpf_prog_array_delete_safe(old_array, event->prog);
1705         } else {
1706                 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1707                 bpf_prog_array_free(old_array);
1708         }
1709
1710         bpf_prog_put(event->prog);
1711         event->prog = NULL;
1712
1713 unlock:
1714         mutex_unlock(&bpf_event_mutex);
1715 }
1716
1717 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1718 {
1719         struct perf_event_query_bpf __user *uquery = info;
1720         struct perf_event_query_bpf query = {};
1721         struct bpf_prog_array *progs;
1722         u32 *ids, prog_cnt, ids_len;
1723         int ret;
1724
1725         if (!perfmon_capable())
1726                 return -EPERM;
1727         if (event->attr.type != PERF_TYPE_TRACEPOINT)
1728                 return -EINVAL;
1729         if (copy_from_user(&query, uquery, sizeof(query)))
1730                 return -EFAULT;
1731
1732         ids_len = query.ids_len;
1733         if (ids_len > BPF_TRACE_MAX_PROGS)
1734                 return -E2BIG;
1735         ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1736         if (!ids)
1737                 return -ENOMEM;
1738         /*
1739          * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1740          * is required when user only wants to check for uquery->prog_cnt.
1741          * There is no need to check for it since the case is handled
1742          * gracefully in bpf_prog_array_copy_info.
1743          */
1744
1745         mutex_lock(&bpf_event_mutex);
1746         progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1747         ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1748         mutex_unlock(&bpf_event_mutex);
1749
1750         if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1751             copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1752                 ret = -EFAULT;
1753
1754         kfree(ids);
1755         return ret;
1756 }
1757
1758 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1759 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1760
1761 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1762 {
1763         struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1764
1765         for (; btp < __stop__bpf_raw_tp; btp++) {
1766                 if (!strcmp(btp->tp->name, name))
1767                         return btp;
1768         }
1769
1770         return bpf_get_raw_tracepoint_module(name);
1771 }
1772
1773 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1774 {
1775         struct module *mod;
1776
1777         preempt_disable();
1778         mod = __module_address((unsigned long)btp);
1779         module_put(mod);
1780         preempt_enable();
1781 }
1782
1783 static __always_inline
1784 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1785 {
1786         cant_sleep();
1787         rcu_read_lock();
1788         (void) BPF_PROG_RUN(prog, args);
1789         rcu_read_unlock();
1790 }
1791
1792 #define UNPACK(...)                     __VA_ARGS__
1793 #define REPEAT_1(FN, DL, X, ...)        FN(X)
1794 #define REPEAT_2(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1795 #define REPEAT_3(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1796 #define REPEAT_4(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1797 #define REPEAT_5(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1798 #define REPEAT_6(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1799 #define REPEAT_7(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1800 #define REPEAT_8(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1801 #define REPEAT_9(FN, DL, X, ...)        FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1802 #define REPEAT_10(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1803 #define REPEAT_11(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1804 #define REPEAT_12(FN, DL, X, ...)       FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1805 #define REPEAT(X, FN, DL, ...)          REPEAT_##X(FN, DL, __VA_ARGS__)
1806
1807 #define SARG(X)         u64 arg##X
1808 #define COPY(X)         args[X] = arg##X
1809
1810 #define __DL_COM        (,)
1811 #define __DL_SEM        (;)
1812
1813 #define __SEQ_0_11      0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1814
1815 #define BPF_TRACE_DEFN_x(x)                                             \
1816         void bpf_trace_run##x(struct bpf_prog *prog,                    \
1817                               REPEAT(x, SARG, __DL_COM, __SEQ_0_11))    \
1818         {                                                               \
1819                 u64 args[x];                                            \
1820                 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);                  \
1821                 __bpf_trace_run(prog, args);                            \
1822         }                                                               \
1823         EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1824 BPF_TRACE_DEFN_x(1);
1825 BPF_TRACE_DEFN_x(2);
1826 BPF_TRACE_DEFN_x(3);
1827 BPF_TRACE_DEFN_x(4);
1828 BPF_TRACE_DEFN_x(5);
1829 BPF_TRACE_DEFN_x(6);
1830 BPF_TRACE_DEFN_x(7);
1831 BPF_TRACE_DEFN_x(8);
1832 BPF_TRACE_DEFN_x(9);
1833 BPF_TRACE_DEFN_x(10);
1834 BPF_TRACE_DEFN_x(11);
1835 BPF_TRACE_DEFN_x(12);
1836
1837 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1838 {
1839         struct tracepoint *tp = btp->tp;
1840
1841         /*
1842          * check that program doesn't access arguments beyond what's
1843          * available in this tracepoint
1844          */
1845         if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1846                 return -EINVAL;
1847
1848         if (prog->aux->max_tp_access > btp->writable_size)
1849                 return -EINVAL;
1850
1851         return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1852 }
1853
1854 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1855 {
1856         return __bpf_probe_register(btp, prog);
1857 }
1858
1859 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1860 {
1861         return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1862 }
1863
1864 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1865                             u32 *fd_type, const char **buf,
1866                             u64 *probe_offset, u64 *probe_addr)
1867 {
1868         bool is_tracepoint, is_syscall_tp;
1869         struct bpf_prog *prog;
1870         int flags, err = 0;
1871
1872         prog = event->prog;
1873         if (!prog)
1874                 return -ENOENT;
1875
1876         /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1877         if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1878                 return -EOPNOTSUPP;
1879
1880         *prog_id = prog->aux->id;
1881         flags = event->tp_event->flags;
1882         is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1883         is_syscall_tp = is_syscall_trace_event(event->tp_event);
1884
1885         if (is_tracepoint || is_syscall_tp) {
1886                 *buf = is_tracepoint ? event->tp_event->tp->name
1887                                      : event->tp_event->name;
1888                 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1889                 *probe_offset = 0x0;
1890                 *probe_addr = 0x0;
1891         } else {
1892                 /* kprobe/uprobe */
1893                 err = -EOPNOTSUPP;
1894 #ifdef CONFIG_KPROBE_EVENTS
1895                 if (flags & TRACE_EVENT_FL_KPROBE)
1896                         err = bpf_get_kprobe_info(event, fd_type, buf,
1897                                                   probe_offset, probe_addr,
1898                                                   event->attr.type == PERF_TYPE_TRACEPOINT);
1899 #endif
1900 #ifdef CONFIG_UPROBE_EVENTS
1901                 if (flags & TRACE_EVENT_FL_UPROBE)
1902                         err = bpf_get_uprobe_info(event, fd_type, buf,
1903                                                   probe_offset,
1904                                                   event->attr.type == PERF_TYPE_TRACEPOINT);
1905 #endif
1906         }
1907
1908         return err;
1909 }
1910
1911 static int __init send_signal_irq_work_init(void)
1912 {
1913         int cpu;
1914         struct send_signal_irq_work *work;
1915
1916         for_each_possible_cpu(cpu) {
1917                 work = per_cpu_ptr(&send_signal_work, cpu);
1918                 init_irq_work(&work->irq_work, do_bpf_send_signal);
1919         }
1920         return 0;
1921 }
1922
1923 subsys_initcall(send_signal_irq_work_init);
1924
1925 #ifdef CONFIG_MODULES
1926 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1927                             void *module)
1928 {
1929         struct bpf_trace_module *btm, *tmp;
1930         struct module *mod = module;
1931         int ret = 0;
1932
1933         if (mod->num_bpf_raw_events == 0 ||
1934             (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1935                 goto out;
1936
1937         mutex_lock(&bpf_module_mutex);
1938
1939         switch (op) {
1940         case MODULE_STATE_COMING:
1941                 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1942                 if (btm) {
1943                         btm->module = module;
1944                         list_add(&btm->list, &bpf_trace_modules);
1945                 } else {
1946                         ret = -ENOMEM;
1947                 }
1948                 break;
1949         case MODULE_STATE_GOING:
1950                 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1951                         if (btm->module == module) {
1952                                 list_del(&btm->list);
1953                                 kfree(btm);
1954                                 break;
1955                         }
1956                 }
1957                 break;
1958         }
1959
1960         mutex_unlock(&bpf_module_mutex);
1961
1962 out:
1963         return notifier_from_errno(ret);
1964 }
1965
1966 static struct notifier_block bpf_module_nb = {
1967         .notifier_call = bpf_event_notify,
1968 };
1969
1970 static int __init bpf_event_init(void)
1971 {
1972         register_module_notifier(&bpf_module_nb);
1973         return 0;
1974 }
1975
1976 fs_initcall(bpf_event_init);
1977 #endif /* CONFIG_MODULES */