1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/btf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/syscalls.h>
17 #include <linux/error-injection.h>
18 #include <linux/btf_ids.h>
20 #include <uapi/linux/bpf.h>
21 #include <uapi/linux/btf.h>
25 #include "trace_probe.h"
28 #define CREATE_TRACE_POINTS
29 #include "bpf_trace.h"
31 #define bpf_event_rcu_dereference(p) \
32 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
35 struct bpf_trace_module {
36 struct module *module;
37 struct list_head list;
40 static LIST_HEAD(bpf_trace_modules);
41 static DEFINE_MUTEX(bpf_module_mutex);
43 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
45 struct bpf_raw_event_map *btp, *ret = NULL;
46 struct bpf_trace_module *btm;
49 mutex_lock(&bpf_module_mutex);
50 list_for_each_entry(btm, &bpf_trace_modules, list) {
51 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
52 btp = &btm->module->bpf_raw_events[i];
53 if (!strcmp(btp->tp->name, name)) {
54 if (try_module_get(btm->module))
61 mutex_unlock(&bpf_module_mutex);
65 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
69 #endif /* CONFIG_MODULES */
71 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
72 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
74 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
75 u64 flags, const struct btf **btf,
79 * trace_call_bpf - invoke BPF program
80 * @call: tracepoint event
81 * @ctx: opaque context pointer
83 * kprobe handlers execute BPF programs via this helper.
84 * Can be used from static tracepoints in the future.
86 * Return: BPF programs always return an integer which is interpreted by
88 * 0 - return from kprobe (event is filtered out)
89 * 1 - store kprobe event into ring buffer
90 * Other values are reserved and currently alias to 1
92 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
96 if (in_nmi()) /* not supported yet */
101 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
103 * since some bpf program is already running on this cpu,
104 * don't call into another bpf program (same or different)
105 * and don't send kprobe event into ring-buffer,
106 * so return zero here
113 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114 * to all call sites, we did a bpf_prog_array_valid() there to check
115 * whether call->prog_array is empty or not, which is
116 * a heurisitc to speed up execution.
118 * If bpf_prog_array_valid() fetched prog_array was
119 * non-NULL, we go into trace_call_bpf() and do the actual
120 * proper rcu_dereference() under RCU lock.
121 * If it turns out that prog_array is NULL then, we bail out.
122 * For the opposite, if the bpf_prog_array_valid() fetched pointer
123 * was NULL, you'll skip the prog_array with the risk of missing
124 * out of events when it was updated in between this and the
125 * rcu_dereference() which is accepted risk.
127 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
130 __this_cpu_dec(bpf_prog_active);
135 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
136 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
138 regs_set_return_value(regs, rc);
139 override_function_with_return(regs);
143 static const struct bpf_func_proto bpf_override_return_proto = {
144 .func = bpf_override_return,
146 .ret_type = RET_INTEGER,
147 .arg1_type = ARG_PTR_TO_CTX,
148 .arg2_type = ARG_ANYTHING,
152 static __always_inline int
153 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
157 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
158 if (unlikely(ret < 0))
159 memset(dst, 0, size);
163 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164 const void __user *, unsafe_ptr)
166 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
169 const struct bpf_func_proto bpf_probe_read_user_proto = {
170 .func = bpf_probe_read_user,
172 .ret_type = RET_INTEGER,
173 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
174 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
175 .arg3_type = ARG_ANYTHING,
178 static __always_inline int
179 bpf_probe_read_user_str_common(void *dst, u32 size,
180 const void __user *unsafe_ptr)
184 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
185 if (unlikely(ret < 0))
186 memset(dst, 0, size);
190 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
191 const void __user *, unsafe_ptr)
193 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
196 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
197 .func = bpf_probe_read_user_str,
199 .ret_type = RET_INTEGER,
200 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
201 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
202 .arg3_type = ARG_ANYTHING,
205 static __always_inline int
206 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
208 int ret = security_locked_down(LOCKDOWN_BPF_READ);
210 if (unlikely(ret < 0))
212 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
213 if (unlikely(ret < 0))
217 memset(dst, 0, size);
221 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
222 const void *, unsafe_ptr)
224 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
227 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
228 .func = bpf_probe_read_kernel,
230 .ret_type = RET_INTEGER,
231 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
232 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
233 .arg3_type = ARG_ANYTHING,
236 static __always_inline int
237 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
239 int ret = security_locked_down(LOCKDOWN_BPF_READ);
241 if (unlikely(ret < 0))
245 * The strncpy_from_kernel_nofault() call will likely not fill the
246 * entire buffer, but that's okay in this circumstance as we're probing
247 * arbitrary memory anyway similar to bpf_probe_read_*() and might
248 * as well probe the stack. Thus, memory is explicitly cleared
249 * only in error case, so that improper users ignoring return
250 * code altogether don't copy garbage; otherwise length of string
251 * is returned that can be used for bpf_perf_event_output() et al.
253 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
254 if (unlikely(ret < 0))
259 memset(dst, 0, size);
263 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
264 const void *, unsafe_ptr)
266 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
269 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
270 .func = bpf_probe_read_kernel_str,
272 .ret_type = RET_INTEGER,
273 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
274 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
275 .arg3_type = ARG_ANYTHING,
278 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
279 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
280 const void *, unsafe_ptr)
282 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
283 return bpf_probe_read_user_common(dst, size,
284 (__force void __user *)unsafe_ptr);
286 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
289 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
290 .func = bpf_probe_read_compat,
292 .ret_type = RET_INTEGER,
293 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
294 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
295 .arg3_type = ARG_ANYTHING,
298 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
299 const void *, unsafe_ptr)
301 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
302 return bpf_probe_read_user_str_common(dst, size,
303 (__force void __user *)unsafe_ptr);
305 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
308 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
309 .func = bpf_probe_read_compat_str,
311 .ret_type = RET_INTEGER,
312 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
313 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
314 .arg3_type = ARG_ANYTHING,
316 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
318 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
322 * Ensure we're in user context which is safe for the helper to
323 * run. This helper has no business in a kthread.
325 * access_ok() should prevent writing to non-user memory, but in
326 * some situations (nommu, temporary switch, etc) access_ok() does
327 * not provide enough validation, hence the check on KERNEL_DS.
329 * nmi_uaccess_okay() ensures the probe is not run in an interim
330 * state, when the task or mm are switched. This is specifically
331 * required to prevent the use of temporary mm.
334 if (unlikely(in_interrupt() ||
335 current->flags & (PF_KTHREAD | PF_EXITING)))
337 if (unlikely(uaccess_kernel()))
339 if (unlikely(!nmi_uaccess_okay()))
342 return copy_to_user_nofault(unsafe_ptr, src, size);
345 static const struct bpf_func_proto bpf_probe_write_user_proto = {
346 .func = bpf_probe_write_user,
348 .ret_type = RET_INTEGER,
349 .arg1_type = ARG_ANYTHING,
350 .arg2_type = ARG_PTR_TO_MEM,
351 .arg3_type = ARG_CONST_SIZE,
354 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
356 if (!capable(CAP_SYS_ADMIN))
359 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
360 current->comm, task_pid_nr(current));
362 return &bpf_probe_write_user_proto;
365 static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
368 void __user *user_ptr = (__force void __user *)unsafe_ptr;
374 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
375 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
376 strncpy_from_user_nofault(buf, user_ptr, bufsz);
382 strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
385 strncpy_from_user_nofault(buf, user_ptr, bufsz);
390 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
392 #define BPF_TRACE_PRINTK_SIZE 1024
394 static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
396 static char buf[BPF_TRACE_PRINTK_SIZE];
401 raw_spin_lock_irqsave(&trace_printk_lock, flags);
403 ret = vsnprintf(buf, sizeof(buf), fmt, ap);
405 /* vsnprintf() will not append null for zero-length strings */
408 trace_bpf_trace_printk(buf);
409 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
415 * Only limited trace_printk() conversion specifiers allowed:
416 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
418 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
419 u64, arg2, u64, arg3)
421 int i, mod[3] = {}, fmt_cnt = 0;
422 char buf[64], fmt_ptype;
423 void *unsafe_ptr = NULL;
424 bool str_seen = false;
427 * bpf_check()->check_func_arg()->check_stack_boundary()
428 * guarantees that fmt points to bpf program stack,
429 * fmt_size bytes of it were initialized and fmt_size > 0
431 if (fmt[--fmt_size] != 0)
434 /* check format string for allowed specifiers */
435 for (i = 0; i < fmt_size; i++) {
436 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
445 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
450 } else if (fmt[i] == 'p') {
452 if ((fmt[i + 1] == 'k' ||
453 fmt[i + 1] == 'u') &&
455 fmt_ptype = fmt[i + 1];
460 if (fmt[i + 1] == 'B') {
465 /* disallow any further format extensions */
466 if (fmt[i + 1] != 0 &&
467 !isspace(fmt[i + 1]) &&
468 !ispunct(fmt[i + 1]))
472 } else if (fmt[i] == 's') {
477 /* allow only one '%s' per fmt string */
481 if (fmt[i + 1] != 0 &&
482 !isspace(fmt[i + 1]) &&
483 !ispunct(fmt[i + 1]))
488 unsafe_ptr = (void *)(long)arg1;
492 unsafe_ptr = (void *)(long)arg2;
496 unsafe_ptr = (void *)(long)arg3;
501 bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
511 if (fmt[i] != 'i' && fmt[i] != 'd' &&
512 fmt[i] != 'u' && fmt[i] != 'x')
518 /* Horrid workaround for getting va_list handling working with different
519 * argument type combinations generically for 32 and 64 bit archs.
521 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
522 #define __BPF_TP(...) \
523 bpf_do_trace_printk(fmt, ##__VA_ARGS__)
525 #define __BPF_ARG1_TP(...) \
526 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
527 ? __BPF_TP(arg1, ##__VA_ARGS__) \
528 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
529 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
530 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
532 #define __BPF_ARG2_TP(...) \
533 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
534 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
535 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
536 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
537 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
539 #define __BPF_ARG3_TP(...) \
540 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
541 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
542 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
543 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
544 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
546 return __BPF_TP_EMIT();
549 static const struct bpf_func_proto bpf_trace_printk_proto = {
550 .func = bpf_trace_printk,
552 .ret_type = RET_INTEGER,
553 .arg1_type = ARG_PTR_TO_MEM,
554 .arg2_type = ARG_CONST_SIZE,
557 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
560 * This program might be calling bpf_trace_printk,
561 * so enable the associated bpf_trace/bpf_trace_printk event.
562 * Repeat this each time as it is possible a user has
563 * disabled bpf_trace_printk events. By loading a program
564 * calling bpf_trace_printk() however the user has expressed
565 * the intent to see such events.
567 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
568 pr_warn_ratelimited("could not enable bpf_trace_printk events");
570 return &bpf_trace_printk_proto;
573 #define MAX_SEQ_PRINTF_VARARGS 12
574 #define MAX_SEQ_PRINTF_MAX_MEMCPY 6
575 #define MAX_SEQ_PRINTF_STR_LEN 128
577 struct bpf_seq_printf_buf {
578 char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
580 static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
581 static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
583 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
584 const void *, data, u32, data_len)
586 int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
587 int i, buf_used, copy_size, num_args;
588 u64 params[MAX_SEQ_PRINTF_VARARGS];
589 struct bpf_seq_printf_buf *bufs;
590 const u64 *args = data;
592 buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
593 if (WARN_ON_ONCE(buf_used > 1)) {
598 bufs = this_cpu_ptr(&bpf_seq_printf_buf);
601 * bpf_check()->check_func_arg()->check_stack_boundary()
602 * guarantees that fmt points to bpf program stack,
603 * fmt_size bytes of it were initialized and fmt_size > 0
605 if (fmt[--fmt_size] != 0)
611 for (i = 0; i < fmt_size; i++) {
613 if (fmt[i + 1] == '%')
615 else if (!data || !data_len)
620 num_args = data_len / 8;
622 /* check format string for allowed specifiers */
623 for (i = 0; i < fmt_size; i++) {
624 /* only printable ascii for now. */
625 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
633 if (fmt[i + 1] == '%') {
638 if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
643 if (fmt_cnt >= num_args) {
648 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
651 /* skip optional "[0 +-][num]" width formating field */
652 while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
655 if (fmt[i] >= '1' && fmt[i] <= '9') {
657 while (fmt[i] >= '0' && fmt[i] <= '9')
664 /* try our best to copy */
665 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
670 unsafe_ptr = (void *)(long)args[fmt_cnt];
671 err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
672 unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
674 bufs->buf[memcpy_cnt][0] = '\0';
675 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
683 if (fmt[i + 1] == 0 ||
687 /* just kernel pointers */
688 params[fmt_cnt] = args[fmt_cnt];
693 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
694 if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
698 if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
703 if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
709 copy_size = (fmt[i + 2] == '4') ? 4 : 16;
711 err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
712 (void *) (long) args[fmt_cnt],
715 memset(bufs->buf[memcpy_cnt], 0, copy_size);
716 params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
730 if (fmt[i] != 'i' && fmt[i] != 'd' &&
731 fmt[i] != 'u' && fmt[i] != 'x' &&
737 params[fmt_cnt] = args[fmt_cnt];
741 /* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
742 * all of them to seq_printf().
744 seq_printf(m, fmt, params[0], params[1], params[2], params[3],
745 params[4], params[5], params[6], params[7], params[8],
746 params[9], params[10], params[11]);
748 err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
750 this_cpu_dec(bpf_seq_printf_buf_used);
754 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
756 static const struct bpf_func_proto bpf_seq_printf_proto = {
757 .func = bpf_seq_printf,
759 .ret_type = RET_INTEGER,
760 .arg1_type = ARG_PTR_TO_BTF_ID,
761 .arg1_btf_id = &btf_seq_file_ids[0],
762 .arg2_type = ARG_PTR_TO_MEM,
763 .arg3_type = ARG_CONST_SIZE,
764 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
765 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
768 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
770 return seq_write(m, data, len) ? -EOVERFLOW : 0;
773 static const struct bpf_func_proto bpf_seq_write_proto = {
774 .func = bpf_seq_write,
776 .ret_type = RET_INTEGER,
777 .arg1_type = ARG_PTR_TO_BTF_ID,
778 .arg1_btf_id = &btf_seq_file_ids[0],
779 .arg2_type = ARG_PTR_TO_MEM,
780 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
783 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
784 u32, btf_ptr_size, u64, flags)
786 const struct btf *btf;
790 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
794 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
797 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
798 .func = bpf_seq_printf_btf,
800 .ret_type = RET_INTEGER,
801 .arg1_type = ARG_PTR_TO_BTF_ID,
802 .arg1_btf_id = &btf_seq_file_ids[0],
803 .arg2_type = ARG_PTR_TO_MEM,
804 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
805 .arg4_type = ARG_ANYTHING,
808 static __always_inline int
809 get_map_perf_counter(struct bpf_map *map, u64 flags,
810 u64 *value, u64 *enabled, u64 *running)
812 struct bpf_array *array = container_of(map, struct bpf_array, map);
813 unsigned int cpu = smp_processor_id();
814 u64 index = flags & BPF_F_INDEX_MASK;
815 struct bpf_event_entry *ee;
817 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
819 if (index == BPF_F_CURRENT_CPU)
821 if (unlikely(index >= array->map.max_entries))
824 ee = READ_ONCE(array->ptrs[index]);
828 return perf_event_read_local(ee->event, value, enabled, running);
831 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
836 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
838 * this api is ugly since we miss [-22..-2] range of valid
839 * counter values, but that's uapi
846 static const struct bpf_func_proto bpf_perf_event_read_proto = {
847 .func = bpf_perf_event_read,
849 .ret_type = RET_INTEGER,
850 .arg1_type = ARG_CONST_MAP_PTR,
851 .arg2_type = ARG_ANYTHING,
854 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
855 struct bpf_perf_event_value *, buf, u32, size)
859 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
861 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
867 memset(buf, 0, size);
871 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
872 .func = bpf_perf_event_read_value,
874 .ret_type = RET_INTEGER,
875 .arg1_type = ARG_CONST_MAP_PTR,
876 .arg2_type = ARG_ANYTHING,
877 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
878 .arg4_type = ARG_CONST_SIZE,
881 static __always_inline u64
882 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
883 u64 flags, struct perf_sample_data *sd)
885 struct bpf_array *array = container_of(map, struct bpf_array, map);
886 unsigned int cpu = smp_processor_id();
887 u64 index = flags & BPF_F_INDEX_MASK;
888 struct bpf_event_entry *ee;
889 struct perf_event *event;
891 if (index == BPF_F_CURRENT_CPU)
893 if (unlikely(index >= array->map.max_entries))
896 ee = READ_ONCE(array->ptrs[index]);
901 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
902 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
905 if (unlikely(event->oncpu != cpu))
908 return perf_event_output(event, sd, regs);
912 * Support executing tracepoints in normal, irq, and nmi context that each call
913 * bpf_perf_event_output
915 struct bpf_trace_sample_data {
916 struct perf_sample_data sds[3];
919 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
920 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
921 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
922 u64, flags, void *, data, u64, size)
924 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
925 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
926 struct perf_raw_record raw = {
932 struct perf_sample_data *sd;
935 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
940 sd = &sds->sds[nest_level - 1];
942 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
947 perf_sample_data_init(sd, 0, 0);
950 err = __bpf_perf_event_output(regs, map, flags, sd);
953 this_cpu_dec(bpf_trace_nest_level);
957 static const struct bpf_func_proto bpf_perf_event_output_proto = {
958 .func = bpf_perf_event_output,
960 .ret_type = RET_INTEGER,
961 .arg1_type = ARG_PTR_TO_CTX,
962 .arg2_type = ARG_CONST_MAP_PTR,
963 .arg3_type = ARG_ANYTHING,
964 .arg4_type = ARG_PTR_TO_MEM,
965 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
968 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
969 struct bpf_nested_pt_regs {
970 struct pt_regs regs[3];
972 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
973 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
975 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
976 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
978 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
979 struct perf_raw_frag frag = {
984 struct perf_raw_record raw = {
987 .next = ctx_size ? &frag : NULL,
993 struct perf_sample_data *sd;
994 struct pt_regs *regs;
997 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
1001 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
1002 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
1004 perf_fetch_caller_regs(regs);
1005 perf_sample_data_init(sd, 0, 0);
1008 ret = __bpf_perf_event_output(regs, map, flags, sd);
1010 this_cpu_dec(bpf_event_output_nest_level);
1014 BPF_CALL_0(bpf_get_current_task)
1016 return (long) current;
1019 const struct bpf_func_proto bpf_get_current_task_proto = {
1020 .func = bpf_get_current_task,
1022 .ret_type = RET_INTEGER,
1025 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
1027 struct bpf_array *array = container_of(map, struct bpf_array, map);
1028 struct cgroup *cgrp;
1030 if (unlikely(idx >= array->map.max_entries))
1033 cgrp = READ_ONCE(array->ptrs[idx]);
1034 if (unlikely(!cgrp))
1037 return task_under_cgroup_hierarchy(current, cgrp);
1040 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
1041 .func = bpf_current_task_under_cgroup,
1043 .ret_type = RET_INTEGER,
1044 .arg1_type = ARG_CONST_MAP_PTR,
1045 .arg2_type = ARG_ANYTHING,
1048 struct send_signal_irq_work {
1049 struct irq_work irq_work;
1050 struct task_struct *task;
1055 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
1057 static void do_bpf_send_signal(struct irq_work *entry)
1059 struct send_signal_irq_work *work;
1061 work = container_of(entry, struct send_signal_irq_work, irq_work);
1062 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
1065 static int bpf_send_signal_common(u32 sig, enum pid_type type)
1067 struct send_signal_irq_work *work = NULL;
1069 /* Similar to bpf_probe_write_user, task needs to be
1070 * in a sound condition and kernel memory access be
1071 * permitted in order to send signal to the current
1074 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1076 if (unlikely(uaccess_kernel()))
1078 if (unlikely(!nmi_uaccess_okay()))
1081 if (irqs_disabled()) {
1082 /* Do an early check on signal validity. Otherwise,
1083 * the error is lost in deferred irq_work.
1085 if (unlikely(!valid_signal(sig)))
1088 work = this_cpu_ptr(&send_signal_work);
1089 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
1092 /* Add the current task, which is the target of sending signal,
1093 * to the irq_work. The current task may change when queued
1094 * irq works get executed.
1096 work->task = current;
1099 irq_work_queue(&work->irq_work);
1103 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1106 BPF_CALL_1(bpf_send_signal, u32, sig)
1108 return bpf_send_signal_common(sig, PIDTYPE_TGID);
1111 static const struct bpf_func_proto bpf_send_signal_proto = {
1112 .func = bpf_send_signal,
1114 .ret_type = RET_INTEGER,
1115 .arg1_type = ARG_ANYTHING,
1118 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1120 return bpf_send_signal_common(sig, PIDTYPE_PID);
1123 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1124 .func = bpf_send_signal_thread,
1126 .ret_type = RET_INTEGER,
1127 .arg1_type = ARG_ANYTHING,
1130 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
1138 p = d_path(path, buf, sz);
1143 memmove(buf, p, len);
1149 BTF_SET_START(btf_allowlist_d_path)
1150 #ifdef CONFIG_SECURITY
1151 BTF_ID(func, security_file_permission)
1152 BTF_ID(func, security_inode_getattr)
1153 BTF_ID(func, security_file_open)
1155 #ifdef CONFIG_SECURITY_PATH
1156 BTF_ID(func, security_path_truncate)
1158 BTF_ID(func, vfs_truncate)
1159 BTF_ID(func, vfs_fallocate)
1160 BTF_ID(func, dentry_open)
1161 BTF_ID(func, vfs_getattr)
1162 BTF_ID(func, filp_close)
1163 BTF_SET_END(btf_allowlist_d_path)
1165 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
1167 return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
1170 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
1172 static const struct bpf_func_proto bpf_d_path_proto = {
1175 .ret_type = RET_INTEGER,
1176 .arg1_type = ARG_PTR_TO_BTF_ID,
1177 .arg1_btf_id = &bpf_d_path_btf_ids[0],
1178 .arg2_type = ARG_PTR_TO_MEM,
1179 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1180 .allowed = bpf_d_path_allowed,
1183 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
1184 BTF_F_PTR_RAW | BTF_F_ZERO)
1186 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
1187 u64 flags, const struct btf **btf,
1190 const struct btf_type *t;
1192 if (unlikely(flags & ~(BTF_F_ALL)))
1195 if (btf_ptr_size != sizeof(struct btf_ptr))
1198 *btf = bpf_get_btf_vmlinux();
1200 if (IS_ERR_OR_NULL(*btf))
1201 return PTR_ERR(*btf);
1203 if (ptr->type_id > 0)
1204 *btf_id = ptr->type_id;
1209 t = btf_type_by_id(*btf, *btf_id);
1210 if (*btf_id <= 0 || !t)
1216 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1217 u32, btf_ptr_size, u64, flags)
1219 const struct btf *btf;
1223 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1227 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1231 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1232 .func = bpf_snprintf_btf,
1234 .ret_type = RET_INTEGER,
1235 .arg1_type = ARG_PTR_TO_MEM,
1236 .arg2_type = ARG_CONST_SIZE,
1237 .arg3_type = ARG_PTR_TO_MEM,
1238 .arg4_type = ARG_CONST_SIZE,
1239 .arg5_type = ARG_ANYTHING,
1242 const struct bpf_func_proto *
1243 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1246 case BPF_FUNC_map_lookup_elem:
1247 return &bpf_map_lookup_elem_proto;
1248 case BPF_FUNC_map_update_elem:
1249 return &bpf_map_update_elem_proto;
1250 case BPF_FUNC_map_delete_elem:
1251 return &bpf_map_delete_elem_proto;
1252 case BPF_FUNC_map_push_elem:
1253 return &bpf_map_push_elem_proto;
1254 case BPF_FUNC_map_pop_elem:
1255 return &bpf_map_pop_elem_proto;
1256 case BPF_FUNC_map_peek_elem:
1257 return &bpf_map_peek_elem_proto;
1258 case BPF_FUNC_ktime_get_ns:
1259 return &bpf_ktime_get_ns_proto;
1260 case BPF_FUNC_ktime_get_boot_ns:
1261 return &bpf_ktime_get_boot_ns_proto;
1262 case BPF_FUNC_tail_call:
1263 return &bpf_tail_call_proto;
1264 case BPF_FUNC_get_current_pid_tgid:
1265 return &bpf_get_current_pid_tgid_proto;
1266 case BPF_FUNC_get_current_task:
1267 return &bpf_get_current_task_proto;
1268 case BPF_FUNC_get_current_uid_gid:
1269 return &bpf_get_current_uid_gid_proto;
1270 case BPF_FUNC_get_current_comm:
1271 return &bpf_get_current_comm_proto;
1272 case BPF_FUNC_trace_printk:
1273 return bpf_get_trace_printk_proto();
1274 case BPF_FUNC_get_smp_processor_id:
1275 return &bpf_get_smp_processor_id_proto;
1276 case BPF_FUNC_get_numa_node_id:
1277 return &bpf_get_numa_node_id_proto;
1278 case BPF_FUNC_perf_event_read:
1279 return &bpf_perf_event_read_proto;
1280 case BPF_FUNC_probe_write_user:
1281 return bpf_get_probe_write_proto();
1282 case BPF_FUNC_current_task_under_cgroup:
1283 return &bpf_current_task_under_cgroup_proto;
1284 case BPF_FUNC_get_prandom_u32:
1285 return &bpf_get_prandom_u32_proto;
1286 case BPF_FUNC_probe_read_user:
1287 return &bpf_probe_read_user_proto;
1288 case BPF_FUNC_probe_read_kernel:
1289 return &bpf_probe_read_kernel_proto;
1290 case BPF_FUNC_probe_read_user_str:
1291 return &bpf_probe_read_user_str_proto;
1292 case BPF_FUNC_probe_read_kernel_str:
1293 return &bpf_probe_read_kernel_str_proto;
1294 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1295 case BPF_FUNC_probe_read:
1296 return &bpf_probe_read_compat_proto;
1297 case BPF_FUNC_probe_read_str:
1298 return &bpf_probe_read_compat_str_proto;
1300 #ifdef CONFIG_CGROUPS
1301 case BPF_FUNC_get_current_cgroup_id:
1302 return &bpf_get_current_cgroup_id_proto;
1304 case BPF_FUNC_send_signal:
1305 return &bpf_send_signal_proto;
1306 case BPF_FUNC_send_signal_thread:
1307 return &bpf_send_signal_thread_proto;
1308 case BPF_FUNC_perf_event_read_value:
1309 return &bpf_perf_event_read_value_proto;
1310 case BPF_FUNC_get_ns_current_pid_tgid:
1311 return &bpf_get_ns_current_pid_tgid_proto;
1312 case BPF_FUNC_ringbuf_output:
1313 return &bpf_ringbuf_output_proto;
1314 case BPF_FUNC_ringbuf_reserve:
1315 return &bpf_ringbuf_reserve_proto;
1316 case BPF_FUNC_ringbuf_submit:
1317 return &bpf_ringbuf_submit_proto;
1318 case BPF_FUNC_ringbuf_discard:
1319 return &bpf_ringbuf_discard_proto;
1320 case BPF_FUNC_ringbuf_query:
1321 return &bpf_ringbuf_query_proto;
1322 case BPF_FUNC_jiffies64:
1323 return &bpf_jiffies64_proto;
1324 case BPF_FUNC_get_task_stack:
1325 return &bpf_get_task_stack_proto;
1326 case BPF_FUNC_copy_from_user:
1327 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1328 case BPF_FUNC_snprintf_btf:
1329 return &bpf_snprintf_btf_proto;
1335 static const struct bpf_func_proto *
1336 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1339 case BPF_FUNC_perf_event_output:
1340 return &bpf_perf_event_output_proto;
1341 case BPF_FUNC_get_stackid:
1342 return &bpf_get_stackid_proto;
1343 case BPF_FUNC_get_stack:
1344 return &bpf_get_stack_proto;
1345 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1346 case BPF_FUNC_override_return:
1347 return &bpf_override_return_proto;
1350 return bpf_tracing_func_proto(func_id, prog);
1354 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1355 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1356 const struct bpf_prog *prog,
1357 struct bpf_insn_access_aux *info)
1359 if (off < 0 || off >= sizeof(struct pt_regs))
1361 if (type != BPF_READ)
1363 if (off % size != 0)
1366 * Assertion for 32 bit to make sure last 8 byte access
1367 * (BPF_DW) to the last 4 byte member is disallowed.
1369 if (off + size > sizeof(struct pt_regs))
1375 const struct bpf_verifier_ops kprobe_verifier_ops = {
1376 .get_func_proto = kprobe_prog_func_proto,
1377 .is_valid_access = kprobe_prog_is_valid_access,
1380 const struct bpf_prog_ops kprobe_prog_ops = {
1383 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1384 u64, flags, void *, data, u64, size)
1386 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1389 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1390 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1391 * from there and call the same bpf_perf_event_output() helper inline.
1393 return ____bpf_perf_event_output(regs, map, flags, data, size);
1396 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1397 .func = bpf_perf_event_output_tp,
1399 .ret_type = RET_INTEGER,
1400 .arg1_type = ARG_PTR_TO_CTX,
1401 .arg2_type = ARG_CONST_MAP_PTR,
1402 .arg3_type = ARG_ANYTHING,
1403 .arg4_type = ARG_PTR_TO_MEM,
1404 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1407 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1410 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1413 * Same comment as in bpf_perf_event_output_tp(), only that this time
1414 * the other helper's function body cannot be inlined due to being
1415 * external, thus we need to call raw helper function.
1417 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1421 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1422 .func = bpf_get_stackid_tp,
1424 .ret_type = RET_INTEGER,
1425 .arg1_type = ARG_PTR_TO_CTX,
1426 .arg2_type = ARG_CONST_MAP_PTR,
1427 .arg3_type = ARG_ANYTHING,
1430 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1433 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1435 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1436 (unsigned long) size, flags, 0);
1439 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1440 .func = bpf_get_stack_tp,
1442 .ret_type = RET_INTEGER,
1443 .arg1_type = ARG_PTR_TO_CTX,
1444 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1445 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1446 .arg4_type = ARG_ANYTHING,
1449 static const struct bpf_func_proto *
1450 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1453 case BPF_FUNC_perf_event_output:
1454 return &bpf_perf_event_output_proto_tp;
1455 case BPF_FUNC_get_stackid:
1456 return &bpf_get_stackid_proto_tp;
1457 case BPF_FUNC_get_stack:
1458 return &bpf_get_stack_proto_tp;
1460 return bpf_tracing_func_proto(func_id, prog);
1464 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1465 const struct bpf_prog *prog,
1466 struct bpf_insn_access_aux *info)
1468 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1470 if (type != BPF_READ)
1472 if (off % size != 0)
1475 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1479 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1480 .get_func_proto = tp_prog_func_proto,
1481 .is_valid_access = tp_prog_is_valid_access,
1484 const struct bpf_prog_ops tracepoint_prog_ops = {
1487 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1488 struct bpf_perf_event_value *, buf, u32, size)
1492 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1494 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1500 memset(buf, 0, size);
1504 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1505 .func = bpf_perf_prog_read_value,
1507 .ret_type = RET_INTEGER,
1508 .arg1_type = ARG_PTR_TO_CTX,
1509 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1510 .arg3_type = ARG_CONST_SIZE,
1513 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1514 void *, buf, u32, size, u64, flags)
1519 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1520 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1523 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1526 if (unlikely(!br_stack))
1529 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1530 return br_stack->nr * br_entry_size;
1532 if (!buf || (size % br_entry_size != 0))
1535 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1536 memcpy(buf, br_stack->entries, to_copy);
1542 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1543 .func = bpf_read_branch_records,
1545 .ret_type = RET_INTEGER,
1546 .arg1_type = ARG_PTR_TO_CTX,
1547 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1548 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1549 .arg4_type = ARG_ANYTHING,
1552 static const struct bpf_func_proto *
1553 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1556 case BPF_FUNC_perf_event_output:
1557 return &bpf_perf_event_output_proto_tp;
1558 case BPF_FUNC_get_stackid:
1559 return &bpf_get_stackid_proto_pe;
1560 case BPF_FUNC_get_stack:
1561 return &bpf_get_stack_proto_pe;
1562 case BPF_FUNC_perf_prog_read_value:
1563 return &bpf_perf_prog_read_value_proto;
1564 case BPF_FUNC_read_branch_records:
1565 return &bpf_read_branch_records_proto;
1567 return bpf_tracing_func_proto(func_id, prog);
1572 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1573 * to avoid potential recursive reuse issue when/if tracepoints are added
1574 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1576 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1577 * in normal, irq, and nmi context.
1579 struct bpf_raw_tp_regs {
1580 struct pt_regs regs[3];
1582 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1583 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1584 static struct pt_regs *get_bpf_raw_tp_regs(void)
1586 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1587 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1589 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1590 this_cpu_dec(bpf_raw_tp_nest_level);
1591 return ERR_PTR(-EBUSY);
1594 return &tp_regs->regs[nest_level - 1];
1597 static void put_bpf_raw_tp_regs(void)
1599 this_cpu_dec(bpf_raw_tp_nest_level);
1602 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1603 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1605 struct pt_regs *regs = get_bpf_raw_tp_regs();
1609 return PTR_ERR(regs);
1611 perf_fetch_caller_regs(regs);
1612 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1614 put_bpf_raw_tp_regs();
1618 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1619 .func = bpf_perf_event_output_raw_tp,
1621 .ret_type = RET_INTEGER,
1622 .arg1_type = ARG_PTR_TO_CTX,
1623 .arg2_type = ARG_CONST_MAP_PTR,
1624 .arg3_type = ARG_ANYTHING,
1625 .arg4_type = ARG_PTR_TO_MEM,
1626 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1629 extern const struct bpf_func_proto bpf_skb_output_proto;
1630 extern const struct bpf_func_proto bpf_xdp_output_proto;
1632 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1633 struct bpf_map *, map, u64, flags)
1635 struct pt_regs *regs = get_bpf_raw_tp_regs();
1639 return PTR_ERR(regs);
1641 perf_fetch_caller_regs(regs);
1642 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1643 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1645 put_bpf_raw_tp_regs();
1649 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1650 .func = bpf_get_stackid_raw_tp,
1652 .ret_type = RET_INTEGER,
1653 .arg1_type = ARG_PTR_TO_CTX,
1654 .arg2_type = ARG_CONST_MAP_PTR,
1655 .arg3_type = ARG_ANYTHING,
1658 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1659 void *, buf, u32, size, u64, flags)
1661 struct pt_regs *regs = get_bpf_raw_tp_regs();
1665 return PTR_ERR(regs);
1667 perf_fetch_caller_regs(regs);
1668 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1669 (unsigned long) size, flags, 0);
1670 put_bpf_raw_tp_regs();
1674 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1675 .func = bpf_get_stack_raw_tp,
1677 .ret_type = RET_INTEGER,
1678 .arg1_type = ARG_PTR_TO_CTX,
1679 .arg2_type = ARG_PTR_TO_MEM,
1680 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1681 .arg4_type = ARG_ANYTHING,
1684 static const struct bpf_func_proto *
1685 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1688 case BPF_FUNC_perf_event_output:
1689 return &bpf_perf_event_output_proto_raw_tp;
1690 case BPF_FUNC_get_stackid:
1691 return &bpf_get_stackid_proto_raw_tp;
1692 case BPF_FUNC_get_stack:
1693 return &bpf_get_stack_proto_raw_tp;
1695 return bpf_tracing_func_proto(func_id, prog);
1699 const struct bpf_func_proto *
1700 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1704 case BPF_FUNC_skb_output:
1705 return &bpf_skb_output_proto;
1706 case BPF_FUNC_xdp_output:
1707 return &bpf_xdp_output_proto;
1708 case BPF_FUNC_skc_to_tcp6_sock:
1709 return &bpf_skc_to_tcp6_sock_proto;
1710 case BPF_FUNC_skc_to_tcp_sock:
1711 return &bpf_skc_to_tcp_sock_proto;
1712 case BPF_FUNC_skc_to_tcp_timewait_sock:
1713 return &bpf_skc_to_tcp_timewait_sock_proto;
1714 case BPF_FUNC_skc_to_tcp_request_sock:
1715 return &bpf_skc_to_tcp_request_sock_proto;
1716 case BPF_FUNC_skc_to_udp6_sock:
1717 return &bpf_skc_to_udp6_sock_proto;
1719 case BPF_FUNC_seq_printf:
1720 return prog->expected_attach_type == BPF_TRACE_ITER ?
1721 &bpf_seq_printf_proto :
1723 case BPF_FUNC_seq_write:
1724 return prog->expected_attach_type == BPF_TRACE_ITER ?
1725 &bpf_seq_write_proto :
1727 case BPF_FUNC_seq_printf_btf:
1728 return prog->expected_attach_type == BPF_TRACE_ITER ?
1729 &bpf_seq_printf_btf_proto :
1731 case BPF_FUNC_d_path:
1732 return &bpf_d_path_proto;
1734 return raw_tp_prog_func_proto(func_id, prog);
1738 static bool raw_tp_prog_is_valid_access(int off, int size,
1739 enum bpf_access_type type,
1740 const struct bpf_prog *prog,
1741 struct bpf_insn_access_aux *info)
1743 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1745 if (type != BPF_READ)
1747 if (off % size != 0)
1752 static bool tracing_prog_is_valid_access(int off, int size,
1753 enum bpf_access_type type,
1754 const struct bpf_prog *prog,
1755 struct bpf_insn_access_aux *info)
1757 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1759 if (type != BPF_READ)
1761 if (off % size != 0)
1763 return btf_ctx_access(off, size, type, prog, info);
1766 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1767 const union bpf_attr *kattr,
1768 union bpf_attr __user *uattr)
1773 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1774 .get_func_proto = raw_tp_prog_func_proto,
1775 .is_valid_access = raw_tp_prog_is_valid_access,
1778 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1779 .test_run = bpf_prog_test_run_raw_tp,
1782 const struct bpf_verifier_ops tracing_verifier_ops = {
1783 .get_func_proto = tracing_prog_func_proto,
1784 .is_valid_access = tracing_prog_is_valid_access,
1787 const struct bpf_prog_ops tracing_prog_ops = {
1788 .test_run = bpf_prog_test_run_tracing,
1791 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1792 enum bpf_access_type type,
1793 const struct bpf_prog *prog,
1794 struct bpf_insn_access_aux *info)
1797 if (size != sizeof(u64) || type != BPF_READ)
1799 info->reg_type = PTR_TO_TP_BUFFER;
1801 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1804 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1805 .get_func_proto = raw_tp_prog_func_proto,
1806 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1809 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1812 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1813 const struct bpf_prog *prog,
1814 struct bpf_insn_access_aux *info)
1816 const int size_u64 = sizeof(u64);
1818 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1820 if (type != BPF_READ)
1822 if (off % size != 0) {
1823 if (sizeof(unsigned long) != 4)
1827 if (off % size != 4)
1832 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1833 bpf_ctx_record_field_size(info, size_u64);
1834 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1837 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1838 bpf_ctx_record_field_size(info, size_u64);
1839 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1843 if (size != sizeof(long))
1850 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1851 const struct bpf_insn *si,
1852 struct bpf_insn *insn_buf,
1853 struct bpf_prog *prog, u32 *target_size)
1855 struct bpf_insn *insn = insn_buf;
1858 case offsetof(struct bpf_perf_event_data, sample_period):
1859 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1860 data), si->dst_reg, si->src_reg,
1861 offsetof(struct bpf_perf_event_data_kern, data));
1862 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1863 bpf_target_off(struct perf_sample_data, period, 8,
1866 case offsetof(struct bpf_perf_event_data, addr):
1867 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1868 data), si->dst_reg, si->src_reg,
1869 offsetof(struct bpf_perf_event_data_kern, data));
1870 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1871 bpf_target_off(struct perf_sample_data, addr, 8,
1875 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1876 regs), si->dst_reg, si->src_reg,
1877 offsetof(struct bpf_perf_event_data_kern, regs));
1878 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1883 return insn - insn_buf;
1886 const struct bpf_verifier_ops perf_event_verifier_ops = {
1887 .get_func_proto = pe_prog_func_proto,
1888 .is_valid_access = pe_prog_is_valid_access,
1889 .convert_ctx_access = pe_prog_convert_ctx_access,
1892 const struct bpf_prog_ops perf_event_prog_ops = {
1895 static DEFINE_MUTEX(bpf_event_mutex);
1897 #define BPF_TRACE_MAX_PROGS 64
1899 int perf_event_attach_bpf_prog(struct perf_event *event,
1900 struct bpf_prog *prog)
1902 struct bpf_prog_array *old_array;
1903 struct bpf_prog_array *new_array;
1907 * Kprobe override only works if they are on the function entry,
1908 * and only if they are on the opt-in list.
1910 if (prog->kprobe_override &&
1911 (!trace_kprobe_on_func_entry(event->tp_event) ||
1912 !trace_kprobe_error_injectable(event->tp_event)))
1915 mutex_lock(&bpf_event_mutex);
1920 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1922 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1927 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1931 /* set the new array to event->tp_event and set event->prog */
1933 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1934 bpf_prog_array_free(old_array);
1937 mutex_unlock(&bpf_event_mutex);
1941 void perf_event_detach_bpf_prog(struct perf_event *event)
1943 struct bpf_prog_array *old_array;
1944 struct bpf_prog_array *new_array;
1947 mutex_lock(&bpf_event_mutex);
1952 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1953 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1957 bpf_prog_array_delete_safe(old_array, event->prog);
1959 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1960 bpf_prog_array_free(old_array);
1963 bpf_prog_put(event->prog);
1967 mutex_unlock(&bpf_event_mutex);
1970 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1972 struct perf_event_query_bpf __user *uquery = info;
1973 struct perf_event_query_bpf query = {};
1974 struct bpf_prog_array *progs;
1975 u32 *ids, prog_cnt, ids_len;
1978 if (!perfmon_capable())
1980 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1982 if (copy_from_user(&query, uquery, sizeof(query)))
1985 ids_len = query.ids_len;
1986 if (ids_len > BPF_TRACE_MAX_PROGS)
1988 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1992 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1993 * is required when user only wants to check for uquery->prog_cnt.
1994 * There is no need to check for it since the case is handled
1995 * gracefully in bpf_prog_array_copy_info.
1998 mutex_lock(&bpf_event_mutex);
1999 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2000 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2001 mutex_unlock(&bpf_event_mutex);
2003 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2004 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2011 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2012 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2014 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2016 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2018 for (; btp < __stop__bpf_raw_tp; btp++) {
2019 if (!strcmp(btp->tp->name, name))
2023 return bpf_get_raw_tracepoint_module(name);
2026 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2028 struct module *mod = __module_address((unsigned long)btp);
2034 static __always_inline
2035 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2039 (void) BPF_PROG_RUN(prog, args);
2043 #define UNPACK(...) __VA_ARGS__
2044 #define REPEAT_1(FN, DL, X, ...) FN(X)
2045 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2046 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2047 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2048 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2049 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2050 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2051 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2052 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2053 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2054 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2055 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2056 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
2058 #define SARG(X) u64 arg##X
2059 #define COPY(X) args[X] = arg##X
2061 #define __DL_COM (,)
2062 #define __DL_SEM (;)
2064 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2066 #define BPF_TRACE_DEFN_x(x) \
2067 void bpf_trace_run##x(struct bpf_prog *prog, \
2068 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
2071 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
2072 __bpf_trace_run(prog, args); \
2074 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2075 BPF_TRACE_DEFN_x(1);
2076 BPF_TRACE_DEFN_x(2);
2077 BPF_TRACE_DEFN_x(3);
2078 BPF_TRACE_DEFN_x(4);
2079 BPF_TRACE_DEFN_x(5);
2080 BPF_TRACE_DEFN_x(6);
2081 BPF_TRACE_DEFN_x(7);
2082 BPF_TRACE_DEFN_x(8);
2083 BPF_TRACE_DEFN_x(9);
2084 BPF_TRACE_DEFN_x(10);
2085 BPF_TRACE_DEFN_x(11);
2086 BPF_TRACE_DEFN_x(12);
2088 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2090 struct tracepoint *tp = btp->tp;
2093 * check that program doesn't access arguments beyond what's
2094 * available in this tracepoint
2096 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2099 if (prog->aux->max_tp_access > btp->writable_size)
2102 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
2105 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2107 return __bpf_probe_register(btp, prog);
2110 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2112 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2115 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2116 u32 *fd_type, const char **buf,
2117 u64 *probe_offset, u64 *probe_addr)
2119 bool is_tracepoint, is_syscall_tp;
2120 struct bpf_prog *prog;
2127 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2128 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2131 *prog_id = prog->aux->id;
2132 flags = event->tp_event->flags;
2133 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2134 is_syscall_tp = is_syscall_trace_event(event->tp_event);
2136 if (is_tracepoint || is_syscall_tp) {
2137 *buf = is_tracepoint ? event->tp_event->tp->name
2138 : event->tp_event->name;
2139 *fd_type = BPF_FD_TYPE_TRACEPOINT;
2140 *probe_offset = 0x0;
2145 #ifdef CONFIG_KPROBE_EVENTS
2146 if (flags & TRACE_EVENT_FL_KPROBE)
2147 err = bpf_get_kprobe_info(event, fd_type, buf,
2148 probe_offset, probe_addr,
2149 event->attr.type == PERF_TYPE_TRACEPOINT);
2151 #ifdef CONFIG_UPROBE_EVENTS
2152 if (flags & TRACE_EVENT_FL_UPROBE)
2153 err = bpf_get_uprobe_info(event, fd_type, buf,
2155 event->attr.type == PERF_TYPE_TRACEPOINT);
2162 static int __init send_signal_irq_work_init(void)
2165 struct send_signal_irq_work *work;
2167 for_each_possible_cpu(cpu) {
2168 work = per_cpu_ptr(&send_signal_work, cpu);
2169 init_irq_work(&work->irq_work, do_bpf_send_signal);
2174 subsys_initcall(send_signal_irq_work_init);
2176 #ifdef CONFIG_MODULES
2177 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2180 struct bpf_trace_module *btm, *tmp;
2181 struct module *mod = module;
2183 if (mod->num_bpf_raw_events == 0 ||
2184 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2187 mutex_lock(&bpf_module_mutex);
2190 case MODULE_STATE_COMING:
2191 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2193 btm->module = module;
2194 list_add(&btm->list, &bpf_trace_modules);
2197 case MODULE_STATE_GOING:
2198 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2199 if (btm->module == module) {
2200 list_del(&btm->list);
2208 mutex_unlock(&bpf_module_mutex);
2213 static struct notifier_block bpf_module_nb = {
2214 .notifier_call = bpf_event_notify,
2217 static int __init bpf_event_init(void)
2219 register_module_notifier(&bpf_module_nb);
2223 fs_initcall(bpf_event_init);
2224 #endif /* CONFIG_MODULES */