1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
19 #include "trace_probe.h"
22 #define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
26 struct bpf_trace_module {
27 struct module *module;
28 struct list_head list;
31 static LIST_HEAD(bpf_trace_modules);
32 static DEFINE_MUTEX(bpf_module_mutex);
34 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
36 struct bpf_raw_event_map *btp, *ret = NULL;
37 struct bpf_trace_module *btm;
40 mutex_lock(&bpf_module_mutex);
41 list_for_each_entry(btm, &bpf_trace_modules, list) {
42 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 btp = &btm->module->bpf_raw_events[i];
44 if (!strcmp(btp->tp->name, name)) {
45 if (try_module_get(btm->module))
52 mutex_unlock(&bpf_module_mutex);
56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
60 #endif /* CONFIG_MODULES */
62 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
63 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
66 * trace_call_bpf - invoke BPF program
67 * @call: tracepoint event
68 * @ctx: opaque context pointer
70 * kprobe handlers execute BPF programs via this helper.
71 * Can be used from static tracepoints in the future.
73 * Return: BPF programs always return an integer which is interpreted by
75 * 0 - return from kprobe (event is filtered out)
76 * 1 - store kprobe event into ring buffer
77 * Other values are reserved and currently alias to 1
79 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
83 if (in_nmi()) /* not supported yet */
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
90 * since some bpf program is already running on this cpu,
91 * don't call into another bpf program (same or different)
92 * and don't send kprobe event into ring-buffer,
100 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 * to all call sites, we did a bpf_prog_array_valid() there to check
102 * whether call->prog_array is empty or not, which is
103 * a heurisitc to speed up execution.
105 * If bpf_prog_array_valid() fetched prog_array was
106 * non-NULL, we go into trace_call_bpf() and do the actual
107 * proper rcu_dereference() under RCU lock.
108 * If it turns out that prog_array is NULL then, we bail out.
109 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 * was NULL, you'll skip the prog_array with the risk of missing
111 * out of events when it was updated in between this and the
112 * rcu_dereference() which is accepted risk.
114 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
117 __this_cpu_dec(bpf_prog_active);
122 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
123 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
125 regs_set_return_value(regs, rc);
126 override_function_with_return(regs);
130 static const struct bpf_func_proto bpf_override_return_proto = {
131 .func = bpf_override_return,
133 .ret_type = RET_INTEGER,
134 .arg1_type = ARG_PTR_TO_CTX,
135 .arg2_type = ARG_ANYTHING,
139 static __always_inline int
140 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
144 ret = probe_user_read(dst, unsafe_ptr, size);
145 if (unlikely(ret < 0))
146 memset(dst, 0, size);
150 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
151 const void __user *, unsafe_ptr)
153 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
156 const struct bpf_func_proto bpf_probe_read_user_proto = {
157 .func = bpf_probe_read_user,
159 .ret_type = RET_INTEGER,
160 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
161 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
162 .arg3_type = ARG_ANYTHING,
165 static __always_inline int
166 bpf_probe_read_user_str_common(void *dst, u32 size,
167 const void __user *unsafe_ptr)
171 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
172 if (unlikely(ret < 0))
173 memset(dst, 0, size);
177 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
178 const void __user *, unsafe_ptr)
180 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
183 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
184 .func = bpf_probe_read_user_str,
186 .ret_type = RET_INTEGER,
187 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
188 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
189 .arg3_type = ARG_ANYTHING,
192 static __always_inline int
193 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
195 int ret = security_locked_down(LOCKDOWN_BPF_READ);
197 if (unlikely(ret < 0))
199 ret = probe_kernel_read(dst, unsafe_ptr, size);
200 if (unlikely(ret < 0))
204 memset(dst, 0, size);
208 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
209 const void *, unsafe_ptr)
211 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
214 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
215 .func = bpf_probe_read_kernel,
217 .ret_type = RET_INTEGER,
218 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
219 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
220 .arg3_type = ARG_ANYTHING,
223 static __always_inline int
224 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
226 int ret = security_locked_down(LOCKDOWN_BPF_READ);
228 if (unlikely(ret < 0))
232 * The strncpy_from_kernel_nofault() call will likely not fill the
233 * entire buffer, but that's okay in this circumstance as we're probing
234 * arbitrary memory anyway similar to bpf_probe_read_*() and might
235 * as well probe the stack. Thus, memory is explicitly cleared
236 * only in error case, so that improper users ignoring return
237 * code altogether don't copy garbage; otherwise length of string
238 * is returned that can be used for bpf_perf_event_output() et al.
240 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
241 if (unlikely(ret < 0))
246 memset(dst, 0, size);
250 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
251 const void *, unsafe_ptr)
253 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
256 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
257 .func = bpf_probe_read_kernel_str,
259 .ret_type = RET_INTEGER,
260 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
261 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
262 .arg3_type = ARG_ANYTHING,
265 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
266 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
267 const void *, unsafe_ptr)
269 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
270 return bpf_probe_read_user_common(dst, size,
271 (__force void __user *)unsafe_ptr);
273 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
276 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
277 .func = bpf_probe_read_compat,
279 .ret_type = RET_INTEGER,
280 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
281 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
282 .arg3_type = ARG_ANYTHING,
285 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
286 const void *, unsafe_ptr)
288 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
289 return bpf_probe_read_user_str_common(dst, size,
290 (__force void __user *)unsafe_ptr);
292 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
295 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
296 .func = bpf_probe_read_compat_str,
298 .ret_type = RET_INTEGER,
299 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
300 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
301 .arg3_type = ARG_ANYTHING,
303 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
305 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
309 * Ensure we're in user context which is safe for the helper to
310 * run. This helper has no business in a kthread.
312 * access_ok() should prevent writing to non-user memory, but in
313 * some situations (nommu, temporary switch, etc) access_ok() does
314 * not provide enough validation, hence the check on KERNEL_DS.
316 * nmi_uaccess_okay() ensures the probe is not run in an interim
317 * state, when the task or mm are switched. This is specifically
318 * required to prevent the use of temporary mm.
321 if (unlikely(in_interrupt() ||
322 current->flags & (PF_KTHREAD | PF_EXITING)))
324 if (unlikely(uaccess_kernel()))
326 if (unlikely(!nmi_uaccess_okay()))
329 return probe_user_write(unsafe_ptr, src, size);
332 static const struct bpf_func_proto bpf_probe_write_user_proto = {
333 .func = bpf_probe_write_user,
335 .ret_type = RET_INTEGER,
336 .arg1_type = ARG_ANYTHING,
337 .arg2_type = ARG_PTR_TO_MEM,
338 .arg3_type = ARG_CONST_SIZE,
341 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
343 if (!capable(CAP_SYS_ADMIN))
346 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
347 current->comm, task_pid_nr(current));
349 return &bpf_probe_write_user_proto;
352 static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
355 void __user *user_ptr = (__force void __user *)unsafe_ptr;
361 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
362 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
363 strncpy_from_user_nofault(buf, user_ptr, bufsz);
369 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
372 strncpy_from_user_nofault(buf, user_ptr, bufsz);
378 * Only limited trace_printk() conversion specifiers allowed:
379 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
381 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
382 u64, arg2, u64, arg3)
384 int i, mod[3] = {}, fmt_cnt = 0;
385 char buf[64], fmt_ptype;
386 void *unsafe_ptr = NULL;
387 bool str_seen = false;
390 * bpf_check()->check_func_arg()->check_stack_boundary()
391 * guarantees that fmt points to bpf program stack,
392 * fmt_size bytes of it were initialized and fmt_size > 0
394 if (fmt[--fmt_size] != 0)
397 /* check format string for allowed specifiers */
398 for (i = 0; i < fmt_size; i++) {
399 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
408 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
413 } else if (fmt[i] == 'p') {
415 if ((fmt[i + 1] == 'k' ||
416 fmt[i + 1] == 'u') &&
418 fmt_ptype = fmt[i + 1];
423 if (fmt[i + 1] == 'B') {
428 /* disallow any further format extensions */
429 if (fmt[i + 1] != 0 &&
430 !isspace(fmt[i + 1]) &&
431 !ispunct(fmt[i + 1]))
435 } else if (fmt[i] == 's') {
440 /* allow only one '%s' per fmt string */
444 if (fmt[i + 1] != 0 &&
445 !isspace(fmt[i + 1]) &&
446 !ispunct(fmt[i + 1]))
451 unsafe_ptr = (void *)(long)arg1;
455 unsafe_ptr = (void *)(long)arg2;
459 unsafe_ptr = (void *)(long)arg3;
464 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
474 if (fmt[i] != 'i' && fmt[i] != 'd' &&
475 fmt[i] != 'u' && fmt[i] != 'x')
481 /* Horrid workaround for getting va_list handling working with different
482 * argument type combinations generically for 32 and 64 bit archs.
484 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
485 #define __BPF_TP(...) \
486 __trace_printk(0 /* Fake ip */, \
489 #define __BPF_ARG1_TP(...) \
490 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
491 ? __BPF_TP(arg1, ##__VA_ARGS__) \
492 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
493 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
494 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
496 #define __BPF_ARG2_TP(...) \
497 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
498 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
499 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
500 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
501 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
503 #define __BPF_ARG3_TP(...) \
504 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
505 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
506 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
507 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
508 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
510 return __BPF_TP_EMIT();
513 static const struct bpf_func_proto bpf_trace_printk_proto = {
514 .func = bpf_trace_printk,
516 .ret_type = RET_INTEGER,
517 .arg1_type = ARG_PTR_TO_MEM,
518 .arg2_type = ARG_CONST_SIZE,
521 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
524 * this program might be calling bpf_trace_printk,
525 * so allocate per-cpu printk buffers
527 trace_printk_init_buffers();
529 return &bpf_trace_printk_proto;
532 #define MAX_SEQ_PRINTF_VARARGS 12
533 #define MAX_SEQ_PRINTF_MAX_MEMCPY 6
534 #define MAX_SEQ_PRINTF_STR_LEN 128
536 struct bpf_seq_printf_buf {
537 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
539 static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
540 static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
542 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
543 const void *, data, u32, data_len)
545 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
546 int i, buf_used, copy_size, num_args;
547 u64 params[MAX_SEQ_PRINTF_VARARGS];
548 struct bpf_seq_printf_buf *bufs;
549 const u64 *args = data;
551 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
552 if (WARN_ON_ONCE(buf_used > 1)) {
557 bufs = this_cpu_ptr(&bpf_seq_printf_buf);
560 * bpf_check()->check_func_arg()->check_stack_boundary()
561 * guarantees that fmt points to bpf program stack,
562 * fmt_size bytes of it were initialized and fmt_size > 0
564 if (fmt[--fmt_size] != 0)
570 for (i = 0; i < fmt_size; i++) {
572 if (fmt[i + 1] == '%')
574 else if (!data || !data_len)
579 num_args = data_len / 8;
581 /* check format string for allowed specifiers */
582 for (i = 0; i < fmt_size; i++) {
583 /* only printable ascii for now. */
584 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
592 if (fmt[i + 1] == '%') {
597 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
602 if (fmt_cnt >= num_args) {
607 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
610 /* skip optional "[0 +-][num]" width formating field */
611 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
614 if (fmt[i] >= '1' && fmt[i] <= '9') {
616 while (fmt[i] >= '0' && fmt[i] <= '9')
623 /* try our best to copy */
624 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
629 unsafe_ptr = (void *)(long)args[fmt_cnt];
630 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
631 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
633 bufs->buf[memcpy_cnt][0] = '\0';
634 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
642 if (fmt[i + 1] == 0 ||
646 /* just kernel pointers */
647 params[fmt_cnt] = args[fmt_cnt];
652 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
653 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
657 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
662 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
668 copy_size = (fmt[i + 2] == '4') ? 4 : 16;
670 err = probe_kernel_read(bufs->buf[memcpy_cnt],
671 (void *) (long) args[fmt_cnt],
674 memset(bufs->buf[memcpy_cnt], 0, copy_size);
675 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
689 if (fmt[i] != 'i' && fmt[i] != 'd' &&
690 fmt[i] != 'u' && fmt[i] != 'x' &&
696 params[fmt_cnt] = args[fmt_cnt];
700 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
701 * all of them to seq_printf().
703 seq_printf(m, fmt, params[0], params[1], params[2], params[3],
704 params[4], params[5], params[6], params[7], params[8],
705 params[9], params[10], params[11]);
707 err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
709 this_cpu_dec(bpf_seq_printf_buf_used);
713 static int bpf_seq_printf_btf_ids[5];
714 static const struct bpf_func_proto bpf_seq_printf_proto = {
715 .func = bpf_seq_printf,
717 .ret_type = RET_INTEGER,
718 .arg1_type = ARG_PTR_TO_BTF_ID,
719 .arg2_type = ARG_PTR_TO_MEM,
720 .arg3_type = ARG_CONST_SIZE,
721 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
722 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
723 .btf_id = bpf_seq_printf_btf_ids,
726 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
728 return seq_write(m, data, len) ? -EOVERFLOW : 0;
731 static int bpf_seq_write_btf_ids[5];
732 static const struct bpf_func_proto bpf_seq_write_proto = {
733 .func = bpf_seq_write,
735 .ret_type = RET_INTEGER,
736 .arg1_type = ARG_PTR_TO_BTF_ID,
737 .arg2_type = ARG_PTR_TO_MEM,
738 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
739 .btf_id = bpf_seq_write_btf_ids,
742 static __always_inline int
743 get_map_perf_counter(struct bpf_map *map, u64 flags,
744 u64 *value, u64 *enabled, u64 *running)
746 struct bpf_array *array = container_of(map, struct bpf_array, map);
747 unsigned int cpu = smp_processor_id();
748 u64 index = flags & BPF_F_INDEX_MASK;
749 struct bpf_event_entry *ee;
751 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
753 if (index == BPF_F_CURRENT_CPU)
755 if (unlikely(index >= array->map.max_entries))
758 ee = READ_ONCE(array->ptrs[index]);
762 return perf_event_read_local(ee->event, value, enabled, running);
765 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
770 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
772 * this api is ugly since we miss [-22..-2] range of valid
773 * counter values, but that's uapi
780 static const struct bpf_func_proto bpf_perf_event_read_proto = {
781 .func = bpf_perf_event_read,
783 .ret_type = RET_INTEGER,
784 .arg1_type = ARG_CONST_MAP_PTR,
785 .arg2_type = ARG_ANYTHING,
788 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
789 struct bpf_perf_event_value *, buf, u32, size)
793 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
795 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
801 memset(buf, 0, size);
805 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
806 .func = bpf_perf_event_read_value,
808 .ret_type = RET_INTEGER,
809 .arg1_type = ARG_CONST_MAP_PTR,
810 .arg2_type = ARG_ANYTHING,
811 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
812 .arg4_type = ARG_CONST_SIZE,
815 static __always_inline u64
816 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
817 u64 flags, struct perf_sample_data *sd)
819 struct bpf_array *array = container_of(map, struct bpf_array, map);
820 unsigned int cpu = smp_processor_id();
821 u64 index = flags & BPF_F_INDEX_MASK;
822 struct bpf_event_entry *ee;
823 struct perf_event *event;
825 if (index == BPF_F_CURRENT_CPU)
827 if (unlikely(index >= array->map.max_entries))
830 ee = READ_ONCE(array->ptrs[index]);
835 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
836 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
839 if (unlikely(event->oncpu != cpu))
842 return perf_event_output(event, sd, regs);
846 * Support executing tracepoints in normal, irq, and nmi context that each call
847 * bpf_perf_event_output
849 struct bpf_trace_sample_data {
850 struct perf_sample_data sds[3];
853 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
854 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
855 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
856 u64, flags, void *, data, u64, size)
858 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
859 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
860 struct perf_raw_record raw = {
866 struct perf_sample_data *sd;
869 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
874 sd = &sds->sds[nest_level - 1];
876 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
881 perf_sample_data_init(sd, 0, 0);
884 err = __bpf_perf_event_output(regs, map, flags, sd);
887 this_cpu_dec(bpf_trace_nest_level);
891 static const struct bpf_func_proto bpf_perf_event_output_proto = {
892 .func = bpf_perf_event_output,
894 .ret_type = RET_INTEGER,
895 .arg1_type = ARG_PTR_TO_CTX,
896 .arg2_type = ARG_CONST_MAP_PTR,
897 .arg3_type = ARG_ANYTHING,
898 .arg4_type = ARG_PTR_TO_MEM,
899 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
902 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
903 struct bpf_nested_pt_regs {
904 struct pt_regs regs[3];
906 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
907 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
909 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
910 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
912 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
913 struct perf_raw_frag frag = {
918 struct perf_raw_record raw = {
921 .next = ctx_size ? &frag : NULL,
927 struct perf_sample_data *sd;
928 struct pt_regs *regs;
931 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
935 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
936 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
938 perf_fetch_caller_regs(regs);
939 perf_sample_data_init(sd, 0, 0);
942 ret = __bpf_perf_event_output(regs, map, flags, sd);
944 this_cpu_dec(bpf_event_output_nest_level);
948 BPF_CALL_0(bpf_get_current_task)
950 return (long) current;
953 const struct bpf_func_proto bpf_get_current_task_proto = {
954 .func = bpf_get_current_task,
956 .ret_type = RET_INTEGER,
959 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
961 struct bpf_array *array = container_of(map, struct bpf_array, map);
964 if (unlikely(idx >= array->map.max_entries))
967 cgrp = READ_ONCE(array->ptrs[idx]);
971 return task_under_cgroup_hierarchy(current, cgrp);
974 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
975 .func = bpf_current_task_under_cgroup,
977 .ret_type = RET_INTEGER,
978 .arg1_type = ARG_CONST_MAP_PTR,
979 .arg2_type = ARG_ANYTHING,
982 struct send_signal_irq_work {
983 struct irq_work irq_work;
984 struct task_struct *task;
989 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
991 static void do_bpf_send_signal(struct irq_work *entry)
993 struct send_signal_irq_work *work;
995 work = container_of(entry, struct send_signal_irq_work, irq_work);
996 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
999 static int bpf_send_signal_common(u32 sig, enum pid_type type)
1001 struct send_signal_irq_work *work = NULL;
1003 /* Similar to bpf_probe_write_user, task needs to be
1004 * in a sound condition and kernel memory access be
1005 * permitted in order to send signal to the current
1008 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1010 if (unlikely(uaccess_kernel()))
1012 if (unlikely(!nmi_uaccess_okay()))
1015 if (irqs_disabled()) {
1016 /* Do an early check on signal validity. Otherwise,
1017 * the error is lost in deferred irq_work.
1019 if (unlikely(!valid_signal(sig)))
1022 work = this_cpu_ptr(&send_signal_work);
1023 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
1026 /* Add the current task, which is the target of sending signal,
1027 * to the irq_work. The current task may change when queued
1028 * irq works get executed.
1030 work->task = current;
1033 irq_work_queue(&work->irq_work);
1037 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1040 BPF_CALL_1(bpf_send_signal, u32, sig)
1042 return bpf_send_signal_common(sig, PIDTYPE_TGID);
1045 static const struct bpf_func_proto bpf_send_signal_proto = {
1046 .func = bpf_send_signal,
1048 .ret_type = RET_INTEGER,
1049 .arg1_type = ARG_ANYTHING,
1052 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1054 return bpf_send_signal_common(sig, PIDTYPE_PID);
1057 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1058 .func = bpf_send_signal_thread,
1060 .ret_type = RET_INTEGER,
1061 .arg1_type = ARG_ANYTHING,
1064 const struct bpf_func_proto *
1065 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1068 case BPF_FUNC_map_lookup_elem:
1069 return &bpf_map_lookup_elem_proto;
1070 case BPF_FUNC_map_update_elem:
1071 return &bpf_map_update_elem_proto;
1072 case BPF_FUNC_map_delete_elem:
1073 return &bpf_map_delete_elem_proto;
1074 case BPF_FUNC_map_push_elem:
1075 return &bpf_map_push_elem_proto;
1076 case BPF_FUNC_map_pop_elem:
1077 return &bpf_map_pop_elem_proto;
1078 case BPF_FUNC_map_peek_elem:
1079 return &bpf_map_peek_elem_proto;
1080 case BPF_FUNC_ktime_get_ns:
1081 return &bpf_ktime_get_ns_proto;
1082 case BPF_FUNC_ktime_get_boot_ns:
1083 return &bpf_ktime_get_boot_ns_proto;
1084 case BPF_FUNC_tail_call:
1085 return &bpf_tail_call_proto;
1086 case BPF_FUNC_get_current_pid_tgid:
1087 return &bpf_get_current_pid_tgid_proto;
1088 case BPF_FUNC_get_current_task:
1089 return &bpf_get_current_task_proto;
1090 case BPF_FUNC_get_current_uid_gid:
1091 return &bpf_get_current_uid_gid_proto;
1092 case BPF_FUNC_get_current_comm:
1093 return &bpf_get_current_comm_proto;
1094 case BPF_FUNC_trace_printk:
1095 return bpf_get_trace_printk_proto();
1096 case BPF_FUNC_get_smp_processor_id:
1097 return &bpf_get_smp_processor_id_proto;
1098 case BPF_FUNC_get_numa_node_id:
1099 return &bpf_get_numa_node_id_proto;
1100 case BPF_FUNC_perf_event_read:
1101 return &bpf_perf_event_read_proto;
1102 case BPF_FUNC_probe_write_user:
1103 return bpf_get_probe_write_proto();
1104 case BPF_FUNC_current_task_under_cgroup:
1105 return &bpf_current_task_under_cgroup_proto;
1106 case BPF_FUNC_get_prandom_u32:
1107 return &bpf_get_prandom_u32_proto;
1108 case BPF_FUNC_probe_read_user:
1109 return &bpf_probe_read_user_proto;
1110 case BPF_FUNC_probe_read_kernel:
1111 return &bpf_probe_read_kernel_proto;
1112 case BPF_FUNC_probe_read_user_str:
1113 return &bpf_probe_read_user_str_proto;
1114 case BPF_FUNC_probe_read_kernel_str:
1115 return &bpf_probe_read_kernel_str_proto;
1116 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1117 case BPF_FUNC_probe_read:
1118 return &bpf_probe_read_compat_proto;
1119 case BPF_FUNC_probe_read_str:
1120 return &bpf_probe_read_compat_str_proto;
1122 #ifdef CONFIG_CGROUPS
1123 case BPF_FUNC_get_current_cgroup_id:
1124 return &bpf_get_current_cgroup_id_proto;
1126 case BPF_FUNC_send_signal:
1127 return &bpf_send_signal_proto;
1128 case BPF_FUNC_send_signal_thread:
1129 return &bpf_send_signal_thread_proto;
1130 case BPF_FUNC_perf_event_read_value:
1131 return &bpf_perf_event_read_value_proto;
1132 case BPF_FUNC_get_ns_current_pid_tgid:
1133 return &bpf_get_ns_current_pid_tgid_proto;
1134 case BPF_FUNC_ringbuf_output:
1135 return &bpf_ringbuf_output_proto;
1136 case BPF_FUNC_ringbuf_reserve:
1137 return &bpf_ringbuf_reserve_proto;
1138 case BPF_FUNC_ringbuf_submit:
1139 return &bpf_ringbuf_submit_proto;
1140 case BPF_FUNC_ringbuf_discard:
1141 return &bpf_ringbuf_discard_proto;
1142 case BPF_FUNC_ringbuf_query:
1143 return &bpf_ringbuf_query_proto;
1144 case BPF_FUNC_jiffies64:
1145 return &bpf_jiffies64_proto;
1146 case BPF_FUNC_get_task_stack:
1147 return &bpf_get_task_stack_proto;
1153 static const struct bpf_func_proto *
1154 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1157 case BPF_FUNC_perf_event_output:
1158 return &bpf_perf_event_output_proto;
1159 case BPF_FUNC_get_stackid:
1160 return &bpf_get_stackid_proto;
1161 case BPF_FUNC_get_stack:
1162 return &bpf_get_stack_proto;
1163 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1164 case BPF_FUNC_override_return:
1165 return &bpf_override_return_proto;
1168 return bpf_tracing_func_proto(func_id, prog);
1172 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1173 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1174 const struct bpf_prog *prog,
1175 struct bpf_insn_access_aux *info)
1177 if (off < 0 || off >= sizeof(struct pt_regs))
1179 if (type != BPF_READ)
1181 if (off % size != 0)
1184 * Assertion for 32 bit to make sure last 8 byte access
1185 * (BPF_DW) to the last 4 byte member is disallowed.
1187 if (off + size > sizeof(struct pt_regs))
1193 const struct bpf_verifier_ops kprobe_verifier_ops = {
1194 .get_func_proto = kprobe_prog_func_proto,
1195 .is_valid_access = kprobe_prog_is_valid_access,
1198 const struct bpf_prog_ops kprobe_prog_ops = {
1201 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1202 u64, flags, void *, data, u64, size)
1204 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1207 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1208 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1209 * from there and call the same bpf_perf_event_output() helper inline.
1211 return ____bpf_perf_event_output(regs, map, flags, data, size);
1214 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1215 .func = bpf_perf_event_output_tp,
1217 .ret_type = RET_INTEGER,
1218 .arg1_type = ARG_PTR_TO_CTX,
1219 .arg2_type = ARG_CONST_MAP_PTR,
1220 .arg3_type = ARG_ANYTHING,
1221 .arg4_type = ARG_PTR_TO_MEM,
1222 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1225 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1228 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1231 * Same comment as in bpf_perf_event_output_tp(), only that this time
1232 * the other helper's function body cannot be inlined due to being
1233 * external, thus we need to call raw helper function.
1235 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1239 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1240 .func = bpf_get_stackid_tp,
1242 .ret_type = RET_INTEGER,
1243 .arg1_type = ARG_PTR_TO_CTX,
1244 .arg2_type = ARG_CONST_MAP_PTR,
1245 .arg3_type = ARG_ANYTHING,
1248 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1251 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1253 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1254 (unsigned long) size, flags, 0);
1257 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1258 .func = bpf_get_stack_tp,
1260 .ret_type = RET_INTEGER,
1261 .arg1_type = ARG_PTR_TO_CTX,
1262 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1263 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1264 .arg4_type = ARG_ANYTHING,
1267 static const struct bpf_func_proto *
1268 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1271 case BPF_FUNC_perf_event_output:
1272 return &bpf_perf_event_output_proto_tp;
1273 case BPF_FUNC_get_stackid:
1274 return &bpf_get_stackid_proto_tp;
1275 case BPF_FUNC_get_stack:
1276 return &bpf_get_stack_proto_tp;
1278 return bpf_tracing_func_proto(func_id, prog);
1282 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1283 const struct bpf_prog *prog,
1284 struct bpf_insn_access_aux *info)
1286 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1288 if (type != BPF_READ)
1290 if (off % size != 0)
1293 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1297 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1298 .get_func_proto = tp_prog_func_proto,
1299 .is_valid_access = tp_prog_is_valid_access,
1302 const struct bpf_prog_ops tracepoint_prog_ops = {
1305 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1306 struct bpf_perf_event_value *, buf, u32, size)
1310 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1312 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1318 memset(buf, 0, size);
1322 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1323 .func = bpf_perf_prog_read_value,
1325 .ret_type = RET_INTEGER,
1326 .arg1_type = ARG_PTR_TO_CTX,
1327 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1328 .arg3_type = ARG_CONST_SIZE,
1331 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1332 void *, buf, u32, size, u64, flags)
1337 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1338 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1341 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1344 if (unlikely(!br_stack))
1347 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1348 return br_stack->nr * br_entry_size;
1350 if (!buf || (size % br_entry_size != 0))
1353 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1354 memcpy(buf, br_stack->entries, to_copy);
1360 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1361 .func = bpf_read_branch_records,
1363 .ret_type = RET_INTEGER,
1364 .arg1_type = ARG_PTR_TO_CTX,
1365 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1366 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1367 .arg4_type = ARG_ANYTHING,
1370 static const struct bpf_func_proto *
1371 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1374 case BPF_FUNC_perf_event_output:
1375 return &bpf_perf_event_output_proto_tp;
1376 case BPF_FUNC_get_stackid:
1377 return &bpf_get_stackid_proto_tp;
1378 case BPF_FUNC_get_stack:
1379 return &bpf_get_stack_proto_tp;
1380 case BPF_FUNC_perf_prog_read_value:
1381 return &bpf_perf_prog_read_value_proto;
1382 case BPF_FUNC_read_branch_records:
1383 return &bpf_read_branch_records_proto;
1385 return bpf_tracing_func_proto(func_id, prog);
1390 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1391 * to avoid potential recursive reuse issue when/if tracepoints are added
1392 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1394 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1395 * in normal, irq, and nmi context.
1397 struct bpf_raw_tp_regs {
1398 struct pt_regs regs[3];
1400 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1401 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1402 static struct pt_regs *get_bpf_raw_tp_regs(void)
1404 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1405 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1407 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1408 this_cpu_dec(bpf_raw_tp_nest_level);
1409 return ERR_PTR(-EBUSY);
1412 return &tp_regs->regs[nest_level - 1];
1415 static void put_bpf_raw_tp_regs(void)
1417 this_cpu_dec(bpf_raw_tp_nest_level);
1420 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1421 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1423 struct pt_regs *regs = get_bpf_raw_tp_regs();
1427 return PTR_ERR(regs);
1429 perf_fetch_caller_regs(regs);
1430 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1432 put_bpf_raw_tp_regs();
1436 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1437 .func = bpf_perf_event_output_raw_tp,
1439 .ret_type = RET_INTEGER,
1440 .arg1_type = ARG_PTR_TO_CTX,
1441 .arg2_type = ARG_CONST_MAP_PTR,
1442 .arg3_type = ARG_ANYTHING,
1443 .arg4_type = ARG_PTR_TO_MEM,
1444 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1447 extern const struct bpf_func_proto bpf_skb_output_proto;
1448 extern const struct bpf_func_proto bpf_xdp_output_proto;
1450 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1451 struct bpf_map *, map, u64, flags)
1453 struct pt_regs *regs = get_bpf_raw_tp_regs();
1457 return PTR_ERR(regs);
1459 perf_fetch_caller_regs(regs);
1460 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1461 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1463 put_bpf_raw_tp_regs();
1467 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1468 .func = bpf_get_stackid_raw_tp,
1470 .ret_type = RET_INTEGER,
1471 .arg1_type = ARG_PTR_TO_CTX,
1472 .arg2_type = ARG_CONST_MAP_PTR,
1473 .arg3_type = ARG_ANYTHING,
1476 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1477 void *, buf, u32, size, u64, flags)
1479 struct pt_regs *regs = get_bpf_raw_tp_regs();
1483 return PTR_ERR(regs);
1485 perf_fetch_caller_regs(regs);
1486 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1487 (unsigned long) size, flags, 0);
1488 put_bpf_raw_tp_regs();
1492 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1493 .func = bpf_get_stack_raw_tp,
1495 .ret_type = RET_INTEGER,
1496 .arg1_type = ARG_PTR_TO_CTX,
1497 .arg2_type = ARG_PTR_TO_MEM,
1498 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1499 .arg4_type = ARG_ANYTHING,
1502 static const struct bpf_func_proto *
1503 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1506 case BPF_FUNC_perf_event_output:
1507 return &bpf_perf_event_output_proto_raw_tp;
1508 case BPF_FUNC_get_stackid:
1509 return &bpf_get_stackid_proto_raw_tp;
1510 case BPF_FUNC_get_stack:
1511 return &bpf_get_stack_proto_raw_tp;
1513 return bpf_tracing_func_proto(func_id, prog);
1517 const struct bpf_func_proto *
1518 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1522 case BPF_FUNC_skb_output:
1523 return &bpf_skb_output_proto;
1524 case BPF_FUNC_xdp_output:
1525 return &bpf_xdp_output_proto;
1526 case BPF_FUNC_skc_to_tcp6_sock:
1527 return &bpf_skc_to_tcp6_sock_proto;
1528 case BPF_FUNC_skc_to_tcp_sock:
1529 return &bpf_skc_to_tcp_sock_proto;
1530 case BPF_FUNC_skc_to_tcp_timewait_sock:
1531 return &bpf_skc_to_tcp_timewait_sock_proto;
1532 case BPF_FUNC_skc_to_tcp_request_sock:
1533 return &bpf_skc_to_tcp_request_sock_proto;
1534 case BPF_FUNC_skc_to_udp6_sock:
1535 return &bpf_skc_to_udp6_sock_proto;
1537 case BPF_FUNC_seq_printf:
1538 return prog->expected_attach_type == BPF_TRACE_ITER ?
1539 &bpf_seq_printf_proto :
1541 case BPF_FUNC_seq_write:
1542 return prog->expected_attach_type == BPF_TRACE_ITER ?
1543 &bpf_seq_write_proto :
1546 return raw_tp_prog_func_proto(func_id, prog);
1550 static bool raw_tp_prog_is_valid_access(int off, int size,
1551 enum bpf_access_type type,
1552 const struct bpf_prog *prog,
1553 struct bpf_insn_access_aux *info)
1555 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1557 if (type != BPF_READ)
1559 if (off % size != 0)
1564 static bool tracing_prog_is_valid_access(int off, int size,
1565 enum bpf_access_type type,
1566 const struct bpf_prog *prog,
1567 struct bpf_insn_access_aux *info)
1569 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1571 if (type != BPF_READ)
1573 if (off % size != 0)
1575 return btf_ctx_access(off, size, type, prog, info);
1578 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1579 const union bpf_attr *kattr,
1580 union bpf_attr __user *uattr)
1585 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1586 .get_func_proto = raw_tp_prog_func_proto,
1587 .is_valid_access = raw_tp_prog_is_valid_access,
1590 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1593 const struct bpf_verifier_ops tracing_verifier_ops = {
1594 .get_func_proto = tracing_prog_func_proto,
1595 .is_valid_access = tracing_prog_is_valid_access,
1598 const struct bpf_prog_ops tracing_prog_ops = {
1599 .test_run = bpf_prog_test_run_tracing,
1602 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1603 enum bpf_access_type type,
1604 const struct bpf_prog *prog,
1605 struct bpf_insn_access_aux *info)
1608 if (size != sizeof(u64) || type != BPF_READ)
1610 info->reg_type = PTR_TO_TP_BUFFER;
1612 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1615 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1616 .get_func_proto = raw_tp_prog_func_proto,
1617 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1620 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1623 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1624 const struct bpf_prog *prog,
1625 struct bpf_insn_access_aux *info)
1627 const int size_u64 = sizeof(u64);
1629 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1631 if (type != BPF_READ)
1633 if (off % size != 0) {
1634 if (sizeof(unsigned long) != 4)
1638 if (off % size != 4)
1643 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1644 bpf_ctx_record_field_size(info, size_u64);
1645 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1648 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1649 bpf_ctx_record_field_size(info, size_u64);
1650 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1654 if (size != sizeof(long))
1661 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1662 const struct bpf_insn *si,
1663 struct bpf_insn *insn_buf,
1664 struct bpf_prog *prog, u32 *target_size)
1666 struct bpf_insn *insn = insn_buf;
1669 case offsetof(struct bpf_perf_event_data, sample_period):
1670 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1671 data), si->dst_reg, si->src_reg,
1672 offsetof(struct bpf_perf_event_data_kern, data));
1673 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1674 bpf_target_off(struct perf_sample_data, period, 8,
1677 case offsetof(struct bpf_perf_event_data, addr):
1678 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1679 data), si->dst_reg, si->src_reg,
1680 offsetof(struct bpf_perf_event_data_kern, data));
1681 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1682 bpf_target_off(struct perf_sample_data, addr, 8,
1686 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1687 regs), si->dst_reg, si->src_reg,
1688 offsetof(struct bpf_perf_event_data_kern, regs));
1689 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1694 return insn - insn_buf;
1697 const struct bpf_verifier_ops perf_event_verifier_ops = {
1698 .get_func_proto = pe_prog_func_proto,
1699 .is_valid_access = pe_prog_is_valid_access,
1700 .convert_ctx_access = pe_prog_convert_ctx_access,
1703 const struct bpf_prog_ops perf_event_prog_ops = {
1706 static DEFINE_MUTEX(bpf_event_mutex);
1708 #define BPF_TRACE_MAX_PROGS 64
1710 int perf_event_attach_bpf_prog(struct perf_event *event,
1711 struct bpf_prog *prog)
1713 struct bpf_prog_array *old_array;
1714 struct bpf_prog_array *new_array;
1718 * Kprobe override only works if they are on the function entry,
1719 * and only if they are on the opt-in list.
1721 if (prog->kprobe_override &&
1722 (!trace_kprobe_on_func_entry(event->tp_event) ||
1723 !trace_kprobe_error_injectable(event->tp_event)))
1726 mutex_lock(&bpf_event_mutex);
1731 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1733 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1738 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1742 /* set the new array to event->tp_event and set event->prog */
1744 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1745 bpf_prog_array_free(old_array);
1748 mutex_unlock(&bpf_event_mutex);
1752 void perf_event_detach_bpf_prog(struct perf_event *event)
1754 struct bpf_prog_array *old_array;
1755 struct bpf_prog_array *new_array;
1758 mutex_lock(&bpf_event_mutex);
1763 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1764 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1768 bpf_prog_array_delete_safe(old_array, event->prog);
1770 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1771 bpf_prog_array_free(old_array);
1774 bpf_prog_put(event->prog);
1778 mutex_unlock(&bpf_event_mutex);
1781 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1783 struct perf_event_query_bpf __user *uquery = info;
1784 struct perf_event_query_bpf query = {};
1785 struct bpf_prog_array *progs;
1786 u32 *ids, prog_cnt, ids_len;
1789 if (!perfmon_capable())
1791 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1793 if (copy_from_user(&query, uquery, sizeof(query)))
1796 ids_len = query.ids_len;
1797 if (ids_len > BPF_TRACE_MAX_PROGS)
1799 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1803 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1804 * is required when user only wants to check for uquery->prog_cnt.
1805 * There is no need to check for it since the case is handled
1806 * gracefully in bpf_prog_array_copy_info.
1809 mutex_lock(&bpf_event_mutex);
1810 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1811 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1812 mutex_unlock(&bpf_event_mutex);
1814 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1815 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1822 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1823 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1825 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1827 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1829 for (; btp < __stop__bpf_raw_tp; btp++) {
1830 if (!strcmp(btp->tp->name, name))
1834 return bpf_get_raw_tracepoint_module(name);
1837 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1839 struct module *mod = __module_address((unsigned long)btp);
1845 static __always_inline
1846 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1850 (void) BPF_PROG_RUN(prog, args);
1854 #define UNPACK(...) __VA_ARGS__
1855 #define REPEAT_1(FN, DL, X, ...) FN(X)
1856 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1857 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1858 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1859 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1860 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1861 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1862 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1863 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1864 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1865 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1866 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1867 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1869 #define SARG(X) u64 arg##X
1870 #define COPY(X) args[X] = arg##X
1872 #define __DL_COM (,)
1873 #define __DL_SEM (;)
1875 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1877 #define BPF_TRACE_DEFN_x(x) \
1878 void bpf_trace_run##x(struct bpf_prog *prog, \
1879 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1882 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1883 __bpf_trace_run(prog, args); \
1885 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1886 BPF_TRACE_DEFN_x(1);
1887 BPF_TRACE_DEFN_x(2);
1888 BPF_TRACE_DEFN_x(3);
1889 BPF_TRACE_DEFN_x(4);
1890 BPF_TRACE_DEFN_x(5);
1891 BPF_TRACE_DEFN_x(6);
1892 BPF_TRACE_DEFN_x(7);
1893 BPF_TRACE_DEFN_x(8);
1894 BPF_TRACE_DEFN_x(9);
1895 BPF_TRACE_DEFN_x(10);
1896 BPF_TRACE_DEFN_x(11);
1897 BPF_TRACE_DEFN_x(12);
1899 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1901 struct tracepoint *tp = btp->tp;
1904 * check that program doesn't access arguments beyond what's
1905 * available in this tracepoint
1907 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1910 if (prog->aux->max_tp_access > btp->writable_size)
1913 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1916 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1918 return __bpf_probe_register(btp, prog);
1921 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1923 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1926 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1927 u32 *fd_type, const char **buf,
1928 u64 *probe_offset, u64 *probe_addr)
1930 bool is_tracepoint, is_syscall_tp;
1931 struct bpf_prog *prog;
1938 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1939 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1942 *prog_id = prog->aux->id;
1943 flags = event->tp_event->flags;
1944 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1945 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1947 if (is_tracepoint || is_syscall_tp) {
1948 *buf = is_tracepoint ? event->tp_event->tp->name
1949 : event->tp_event->name;
1950 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1951 *probe_offset = 0x0;
1956 #ifdef CONFIG_KPROBE_EVENTS
1957 if (flags & TRACE_EVENT_FL_KPROBE)
1958 err = bpf_get_kprobe_info(event, fd_type, buf,
1959 probe_offset, probe_addr,
1960 event->attr.type == PERF_TYPE_TRACEPOINT);
1962 #ifdef CONFIG_UPROBE_EVENTS
1963 if (flags & TRACE_EVENT_FL_UPROBE)
1964 err = bpf_get_uprobe_info(event, fd_type, buf,
1966 event->attr.type == PERF_TYPE_TRACEPOINT);
1973 static int __init send_signal_irq_work_init(void)
1976 struct send_signal_irq_work *work;
1978 for_each_possible_cpu(cpu) {
1979 work = per_cpu_ptr(&send_signal_work, cpu);
1980 init_irq_work(&work->irq_work, do_bpf_send_signal);
1985 subsys_initcall(send_signal_irq_work_init);
1987 #ifdef CONFIG_MODULES
1988 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1991 struct bpf_trace_module *btm, *tmp;
1992 struct module *mod = module;
1994 if (mod->num_bpf_raw_events == 0 ||
1995 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1998 mutex_lock(&bpf_module_mutex);
2001 case MODULE_STATE_COMING:
2002 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2004 btm->module = module;
2005 list_add(&btm->list, &bpf_trace_modules);
2008 case MODULE_STATE_GOING:
2009 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2010 if (btm->module == module) {
2011 list_del(&btm->list);
2019 mutex_unlock(&bpf_module_mutex);
2024 static struct notifier_block bpf_module_nb = {
2025 .notifier_call = bpf_event_notify,
2028 static int __init bpf_event_init(void)
2030 register_module_notifier(&bpf_module_nb);
2034 fs_initcall(bpf_event_init);
2035 #endif /* CONFIG_MODULES */