1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/btf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/syscalls.h>
17 #include <linux/error-injection.h>
18 #include <linux/btf_ids.h>
19 #include <linux/bpf_lsm.h>
21 #include <net/bpf_sk_storage.h>
23 #include <uapi/linux/bpf.h>
24 #include <uapi/linux/btf.h>
28 #include "trace_probe.h"
31 #define CREATE_TRACE_POINTS
32 #include "bpf_trace.h"
34 #define bpf_event_rcu_dereference(p) \
35 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
38 struct bpf_trace_module {
39 struct module *module;
40 struct list_head list;
43 static LIST_HEAD(bpf_trace_modules);
44 static DEFINE_MUTEX(bpf_module_mutex);
46 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
48 struct bpf_raw_event_map *btp, *ret = NULL;
49 struct bpf_trace_module *btm;
52 mutex_lock(&bpf_module_mutex);
53 list_for_each_entry(btm, &bpf_trace_modules, list) {
54 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
55 btp = &btm->module->bpf_raw_events[i];
56 if (!strcmp(btp->tp->name, name)) {
57 if (try_module_get(btm->module))
64 mutex_unlock(&bpf_module_mutex);
68 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
72 #endif /* CONFIG_MODULES */
74 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
75 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
77 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
78 u64 flags, const struct btf **btf,
82 * trace_call_bpf - invoke BPF program
83 * @call: tracepoint event
84 * @ctx: opaque context pointer
86 * kprobe handlers execute BPF programs via this helper.
87 * Can be used from static tracepoints in the future.
89 * Return: BPF programs always return an integer which is interpreted by
91 * 0 - return from kprobe (event is filtered out)
92 * 1 - store kprobe event into ring buffer
93 * Other values are reserved and currently alias to 1
95 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
101 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
103 * since some bpf program is already running on this cpu,
104 * don't call into another bpf program (same or different)
105 * and don't send kprobe event into ring-buffer,
106 * so return zero here
113 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114 * to all call sites, we did a bpf_prog_array_valid() there to check
115 * whether call->prog_array is empty or not, which is
116 * a heuristic to speed up execution.
118 * If bpf_prog_array_valid() fetched prog_array was
119 * non-NULL, we go into trace_call_bpf() and do the actual
120 * proper rcu_dereference() under RCU lock.
121 * If it turns out that prog_array is NULL then, we bail out.
122 * For the opposite, if the bpf_prog_array_valid() fetched pointer
123 * was NULL, you'll skip the prog_array with the risk of missing
124 * out of events when it was updated in between this and the
125 * rcu_dereference() which is accepted risk.
127 ret = BPF_PROG_RUN_ARRAY(call->prog_array, ctx, bpf_prog_run);
130 __this_cpu_dec(bpf_prog_active);
135 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
136 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
138 regs_set_return_value(regs, rc);
139 override_function_with_return(regs);
143 static const struct bpf_func_proto bpf_override_return_proto = {
144 .func = bpf_override_return,
146 .ret_type = RET_INTEGER,
147 .arg1_type = ARG_PTR_TO_CTX,
148 .arg2_type = ARG_ANYTHING,
152 static __always_inline int
153 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
157 ret = copy_from_user_nofault(dst, unsafe_ptr, size);
158 if (unlikely(ret < 0))
159 memset(dst, 0, size);
163 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164 const void __user *, unsafe_ptr)
166 return bpf_probe_read_user_common(dst, size, unsafe_ptr);
169 const struct bpf_func_proto bpf_probe_read_user_proto = {
170 .func = bpf_probe_read_user,
172 .ret_type = RET_INTEGER,
173 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
174 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
175 .arg3_type = ARG_ANYTHING,
178 static __always_inline int
179 bpf_probe_read_user_str_common(void *dst, u32 size,
180 const void __user *unsafe_ptr)
185 * NB: We rely on strncpy_from_user() not copying junk past the NUL
186 * terminator into `dst`.
188 * strncpy_from_user() does long-sized strides in the fast path. If the
189 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
190 * then there could be junk after the NUL in `dst`. If user takes `dst`
191 * and keys a hash map with it, then semantically identical strings can
192 * occupy multiple entries in the map.
194 ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
195 if (unlikely(ret < 0))
196 memset(dst, 0, size);
200 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
201 const void __user *, unsafe_ptr)
203 return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
206 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
207 .func = bpf_probe_read_user_str,
209 .ret_type = RET_INTEGER,
210 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
211 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
212 .arg3_type = ARG_ANYTHING,
215 static __always_inline int
216 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
220 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
221 if (unlikely(ret < 0))
222 memset(dst, 0, size);
226 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
227 const void *, unsafe_ptr)
229 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
232 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
233 .func = bpf_probe_read_kernel,
235 .ret_type = RET_INTEGER,
236 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
237 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
238 .arg3_type = ARG_ANYTHING,
241 static __always_inline int
242 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
247 * The strncpy_from_kernel_nofault() call will likely not fill the
248 * entire buffer, but that's okay in this circumstance as we're probing
249 * arbitrary memory anyway similar to bpf_probe_read_*() and might
250 * as well probe the stack. Thus, memory is explicitly cleared
251 * only in error case, so that improper users ignoring return
252 * code altogether don't copy garbage; otherwise length of string
253 * is returned that can be used for bpf_perf_event_output() et al.
255 ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
256 if (unlikely(ret < 0))
257 memset(dst, 0, size);
261 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
262 const void *, unsafe_ptr)
264 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
267 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
268 .func = bpf_probe_read_kernel_str,
270 .ret_type = RET_INTEGER,
271 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
272 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
273 .arg3_type = ARG_ANYTHING,
276 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
277 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
278 const void *, unsafe_ptr)
280 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
281 return bpf_probe_read_user_common(dst, size,
282 (__force void __user *)unsafe_ptr);
284 return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
287 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
288 .func = bpf_probe_read_compat,
290 .ret_type = RET_INTEGER,
291 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
292 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
293 .arg3_type = ARG_ANYTHING,
296 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
297 const void *, unsafe_ptr)
299 if ((unsigned long)unsafe_ptr < TASK_SIZE) {
300 return bpf_probe_read_user_str_common(dst, size,
301 (__force void __user *)unsafe_ptr);
303 return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
306 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
307 .func = bpf_probe_read_compat_str,
309 .ret_type = RET_INTEGER,
310 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
311 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
312 .arg3_type = ARG_ANYTHING,
314 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
316 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
320 * Ensure we're in user context which is safe for the helper to
321 * run. This helper has no business in a kthread.
323 * access_ok() should prevent writing to non-user memory, but in
324 * some situations (nommu, temporary switch, etc) access_ok() does
325 * not provide enough validation, hence the check on KERNEL_DS.
327 * nmi_uaccess_okay() ensures the probe is not run in an interim
328 * state, when the task or mm are switched. This is specifically
329 * required to prevent the use of temporary mm.
332 if (unlikely(in_interrupt() ||
333 current->flags & (PF_KTHREAD | PF_EXITING)))
335 if (unlikely(uaccess_kernel()))
337 if (unlikely(!nmi_uaccess_okay()))
340 return copy_to_user_nofault(unsafe_ptr, src, size);
343 static const struct bpf_func_proto bpf_probe_write_user_proto = {
344 .func = bpf_probe_write_user,
346 .ret_type = RET_INTEGER,
347 .arg1_type = ARG_ANYTHING,
348 .arg2_type = ARG_PTR_TO_MEM,
349 .arg3_type = ARG_CONST_SIZE,
352 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
354 if (!capable(CAP_SYS_ADMIN))
357 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
358 current->comm, task_pid_nr(current));
360 return &bpf_probe_write_user_proto;
363 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
365 #define MAX_TRACE_PRINTK_VARARGS 3
366 #define BPF_TRACE_PRINTK_SIZE 1024
368 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
369 u64, arg2, u64, arg3)
371 u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
373 static char buf[BPF_TRACE_PRINTK_SIZE];
377 ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args,
378 MAX_TRACE_PRINTK_VARARGS);
382 raw_spin_lock_irqsave(&trace_printk_lock, flags);
383 ret = bstr_printf(buf, sizeof(buf), fmt, bin_args);
385 trace_bpf_trace_printk(buf);
386 raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
388 bpf_bprintf_cleanup();
393 static const struct bpf_func_proto bpf_trace_printk_proto = {
394 .func = bpf_trace_printk,
396 .ret_type = RET_INTEGER,
397 .arg1_type = ARG_PTR_TO_MEM,
398 .arg2_type = ARG_CONST_SIZE,
401 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
404 * This program might be calling bpf_trace_printk,
405 * so enable the associated bpf_trace/bpf_trace_printk event.
406 * Repeat this each time as it is possible a user has
407 * disabled bpf_trace_printk events. By loading a program
408 * calling bpf_trace_printk() however the user has expressed
409 * the intent to see such events.
411 if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
412 pr_warn_ratelimited("could not enable bpf_trace_printk events");
414 return &bpf_trace_printk_proto;
417 #define MAX_SEQ_PRINTF_VARARGS 12
419 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
420 const void *, data, u32, data_len)
425 if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 ||
428 num_args = data_len / 8;
430 err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args);
434 seq_bprintf(m, fmt, bin_args);
436 bpf_bprintf_cleanup();
438 return seq_has_overflowed(m) ? -EOVERFLOW : 0;
441 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
443 static const struct bpf_func_proto bpf_seq_printf_proto = {
444 .func = bpf_seq_printf,
446 .ret_type = RET_INTEGER,
447 .arg1_type = ARG_PTR_TO_BTF_ID,
448 .arg1_btf_id = &btf_seq_file_ids[0],
449 .arg2_type = ARG_PTR_TO_MEM,
450 .arg3_type = ARG_CONST_SIZE,
451 .arg4_type = ARG_PTR_TO_MEM_OR_NULL,
452 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
455 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
457 return seq_write(m, data, len) ? -EOVERFLOW : 0;
460 static const struct bpf_func_proto bpf_seq_write_proto = {
461 .func = bpf_seq_write,
463 .ret_type = RET_INTEGER,
464 .arg1_type = ARG_PTR_TO_BTF_ID,
465 .arg1_btf_id = &btf_seq_file_ids[0],
466 .arg2_type = ARG_PTR_TO_MEM,
467 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
470 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
471 u32, btf_ptr_size, u64, flags)
473 const struct btf *btf;
477 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
481 return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
484 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
485 .func = bpf_seq_printf_btf,
487 .ret_type = RET_INTEGER,
488 .arg1_type = ARG_PTR_TO_BTF_ID,
489 .arg1_btf_id = &btf_seq_file_ids[0],
490 .arg2_type = ARG_PTR_TO_MEM,
491 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
492 .arg4_type = ARG_ANYTHING,
495 static __always_inline int
496 get_map_perf_counter(struct bpf_map *map, u64 flags,
497 u64 *value, u64 *enabled, u64 *running)
499 struct bpf_array *array = container_of(map, struct bpf_array, map);
500 unsigned int cpu = smp_processor_id();
501 u64 index = flags & BPF_F_INDEX_MASK;
502 struct bpf_event_entry *ee;
504 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
506 if (index == BPF_F_CURRENT_CPU)
508 if (unlikely(index >= array->map.max_entries))
511 ee = READ_ONCE(array->ptrs[index]);
515 return perf_event_read_local(ee->event, value, enabled, running);
518 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
523 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
525 * this api is ugly since we miss [-22..-2] range of valid
526 * counter values, but that's uapi
533 static const struct bpf_func_proto bpf_perf_event_read_proto = {
534 .func = bpf_perf_event_read,
536 .ret_type = RET_INTEGER,
537 .arg1_type = ARG_CONST_MAP_PTR,
538 .arg2_type = ARG_ANYTHING,
541 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
542 struct bpf_perf_event_value *, buf, u32, size)
546 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
548 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
554 memset(buf, 0, size);
558 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
559 .func = bpf_perf_event_read_value,
561 .ret_type = RET_INTEGER,
562 .arg1_type = ARG_CONST_MAP_PTR,
563 .arg2_type = ARG_ANYTHING,
564 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
565 .arg4_type = ARG_CONST_SIZE,
568 static __always_inline u64
569 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
570 u64 flags, struct perf_sample_data *sd)
572 struct bpf_array *array = container_of(map, struct bpf_array, map);
573 unsigned int cpu = smp_processor_id();
574 u64 index = flags & BPF_F_INDEX_MASK;
575 struct bpf_event_entry *ee;
576 struct perf_event *event;
578 if (index == BPF_F_CURRENT_CPU)
580 if (unlikely(index >= array->map.max_entries))
583 ee = READ_ONCE(array->ptrs[index]);
588 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
589 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
592 if (unlikely(event->oncpu != cpu))
595 return perf_event_output(event, sd, regs);
599 * Support executing tracepoints in normal, irq, and nmi context that each call
600 * bpf_perf_event_output
602 struct bpf_trace_sample_data {
603 struct perf_sample_data sds[3];
606 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
607 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
608 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
609 u64, flags, void *, data, u64, size)
611 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
612 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
613 struct perf_raw_record raw = {
619 struct perf_sample_data *sd;
622 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
627 sd = &sds->sds[nest_level - 1];
629 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
634 perf_sample_data_init(sd, 0, 0);
637 err = __bpf_perf_event_output(regs, map, flags, sd);
640 this_cpu_dec(bpf_trace_nest_level);
644 static const struct bpf_func_proto bpf_perf_event_output_proto = {
645 .func = bpf_perf_event_output,
647 .ret_type = RET_INTEGER,
648 .arg1_type = ARG_PTR_TO_CTX,
649 .arg2_type = ARG_CONST_MAP_PTR,
650 .arg3_type = ARG_ANYTHING,
651 .arg4_type = ARG_PTR_TO_MEM,
652 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
655 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
656 struct bpf_nested_pt_regs {
657 struct pt_regs regs[3];
659 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
660 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
662 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
663 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
665 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
666 struct perf_raw_frag frag = {
671 struct perf_raw_record raw = {
674 .next = ctx_size ? &frag : NULL,
680 struct perf_sample_data *sd;
681 struct pt_regs *regs;
684 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
688 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
689 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
691 perf_fetch_caller_regs(regs);
692 perf_sample_data_init(sd, 0, 0);
695 ret = __bpf_perf_event_output(regs, map, flags, sd);
697 this_cpu_dec(bpf_event_output_nest_level);
701 BPF_CALL_0(bpf_get_current_task)
703 return (long) current;
706 const struct bpf_func_proto bpf_get_current_task_proto = {
707 .func = bpf_get_current_task,
709 .ret_type = RET_INTEGER,
712 BPF_CALL_0(bpf_get_current_task_btf)
714 return (unsigned long) current;
717 BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
719 static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
720 .func = bpf_get_current_task_btf,
722 .ret_type = RET_PTR_TO_BTF_ID,
723 .ret_btf_id = &bpf_get_current_btf_ids[0],
726 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
728 struct bpf_array *array = container_of(map, struct bpf_array, map);
731 if (unlikely(idx >= array->map.max_entries))
734 cgrp = READ_ONCE(array->ptrs[idx]);
738 return task_under_cgroup_hierarchy(current, cgrp);
741 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
742 .func = bpf_current_task_under_cgroup,
744 .ret_type = RET_INTEGER,
745 .arg1_type = ARG_CONST_MAP_PTR,
746 .arg2_type = ARG_ANYTHING,
749 struct send_signal_irq_work {
750 struct irq_work irq_work;
751 struct task_struct *task;
756 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
758 static void do_bpf_send_signal(struct irq_work *entry)
760 struct send_signal_irq_work *work;
762 work = container_of(entry, struct send_signal_irq_work, irq_work);
763 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
766 static int bpf_send_signal_common(u32 sig, enum pid_type type)
768 struct send_signal_irq_work *work = NULL;
770 /* Similar to bpf_probe_write_user, task needs to be
771 * in a sound condition and kernel memory access be
772 * permitted in order to send signal to the current
775 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
777 if (unlikely(uaccess_kernel()))
779 if (unlikely(!nmi_uaccess_okay()))
782 if (irqs_disabled()) {
783 /* Do an early check on signal validity. Otherwise,
784 * the error is lost in deferred irq_work.
786 if (unlikely(!valid_signal(sig)))
789 work = this_cpu_ptr(&send_signal_work);
790 if (irq_work_is_busy(&work->irq_work))
793 /* Add the current task, which is the target of sending signal,
794 * to the irq_work. The current task may change when queued
795 * irq works get executed.
797 work->task = current;
800 irq_work_queue(&work->irq_work);
804 return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
807 BPF_CALL_1(bpf_send_signal, u32, sig)
809 return bpf_send_signal_common(sig, PIDTYPE_TGID);
812 static const struct bpf_func_proto bpf_send_signal_proto = {
813 .func = bpf_send_signal,
815 .ret_type = RET_INTEGER,
816 .arg1_type = ARG_ANYTHING,
819 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
821 return bpf_send_signal_common(sig, PIDTYPE_PID);
824 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
825 .func = bpf_send_signal_thread,
827 .ret_type = RET_INTEGER,
828 .arg1_type = ARG_ANYTHING,
831 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
839 p = d_path(path, buf, sz);
844 memmove(buf, p, len);
850 BTF_SET_START(btf_allowlist_d_path)
851 #ifdef CONFIG_SECURITY
852 BTF_ID(func, security_file_permission)
853 BTF_ID(func, security_inode_getattr)
854 BTF_ID(func, security_file_open)
856 #ifdef CONFIG_SECURITY_PATH
857 BTF_ID(func, security_path_truncate)
859 BTF_ID(func, vfs_truncate)
860 BTF_ID(func, vfs_fallocate)
861 BTF_ID(func, dentry_open)
862 BTF_ID(func, vfs_getattr)
863 BTF_ID(func, filp_close)
864 BTF_SET_END(btf_allowlist_d_path)
866 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
868 if (prog->type == BPF_PROG_TYPE_TRACING &&
869 prog->expected_attach_type == BPF_TRACE_ITER)
872 if (prog->type == BPF_PROG_TYPE_LSM)
873 return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
875 return btf_id_set_contains(&btf_allowlist_d_path,
876 prog->aux->attach_btf_id);
879 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
881 static const struct bpf_func_proto bpf_d_path_proto = {
884 .ret_type = RET_INTEGER,
885 .arg1_type = ARG_PTR_TO_BTF_ID,
886 .arg1_btf_id = &bpf_d_path_btf_ids[0],
887 .arg2_type = ARG_PTR_TO_MEM,
888 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
889 .allowed = bpf_d_path_allowed,
892 #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
893 BTF_F_PTR_RAW | BTF_F_ZERO)
895 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
896 u64 flags, const struct btf **btf,
899 const struct btf_type *t;
901 if (unlikely(flags & ~(BTF_F_ALL)))
904 if (btf_ptr_size != sizeof(struct btf_ptr))
907 *btf = bpf_get_btf_vmlinux();
909 if (IS_ERR_OR_NULL(*btf))
910 return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
912 if (ptr->type_id > 0)
913 *btf_id = ptr->type_id;
918 t = btf_type_by_id(*btf, *btf_id);
919 if (*btf_id <= 0 || !t)
925 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
926 u32, btf_ptr_size, u64, flags)
928 const struct btf *btf;
932 ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
936 return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
940 const struct bpf_func_proto bpf_snprintf_btf_proto = {
941 .func = bpf_snprintf_btf,
943 .ret_type = RET_INTEGER,
944 .arg1_type = ARG_PTR_TO_MEM,
945 .arg2_type = ARG_CONST_SIZE,
946 .arg3_type = ARG_PTR_TO_MEM,
947 .arg4_type = ARG_CONST_SIZE,
948 .arg5_type = ARG_ANYTHING,
951 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
953 /* This helper call is inlined by verifier. */
954 return ((u64 *)ctx)[-1];
957 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
958 .func = bpf_get_func_ip_tracing,
960 .ret_type = RET_INTEGER,
961 .arg1_type = ARG_PTR_TO_CTX,
964 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
966 struct kprobe *kp = kprobe_running();
968 return kp ? (uintptr_t)kp->addr : 0;
971 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
972 .func = bpf_get_func_ip_kprobe,
974 .ret_type = RET_INTEGER,
975 .arg1_type = ARG_PTR_TO_CTX,
978 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
980 struct bpf_trace_run_ctx *run_ctx;
982 run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
983 return run_ctx->bpf_cookie;
986 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
987 .func = bpf_get_attach_cookie_trace,
989 .ret_type = RET_INTEGER,
990 .arg1_type = ARG_PTR_TO_CTX,
993 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
995 return ctx->event->bpf_cookie;
998 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
999 .func = bpf_get_attach_cookie_pe,
1001 .ret_type = RET_INTEGER,
1002 .arg1_type = ARG_PTR_TO_CTX,
1005 static const struct bpf_func_proto *
1006 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1009 case BPF_FUNC_map_lookup_elem:
1010 return &bpf_map_lookup_elem_proto;
1011 case BPF_FUNC_map_update_elem:
1012 return &bpf_map_update_elem_proto;
1013 case BPF_FUNC_map_delete_elem:
1014 return &bpf_map_delete_elem_proto;
1015 case BPF_FUNC_map_push_elem:
1016 return &bpf_map_push_elem_proto;
1017 case BPF_FUNC_map_pop_elem:
1018 return &bpf_map_pop_elem_proto;
1019 case BPF_FUNC_map_peek_elem:
1020 return &bpf_map_peek_elem_proto;
1021 case BPF_FUNC_ktime_get_ns:
1022 return &bpf_ktime_get_ns_proto;
1023 case BPF_FUNC_ktime_get_boot_ns:
1024 return &bpf_ktime_get_boot_ns_proto;
1025 case BPF_FUNC_ktime_get_coarse_ns:
1026 return &bpf_ktime_get_coarse_ns_proto;
1027 case BPF_FUNC_tail_call:
1028 return &bpf_tail_call_proto;
1029 case BPF_FUNC_get_current_pid_tgid:
1030 return &bpf_get_current_pid_tgid_proto;
1031 case BPF_FUNC_get_current_task:
1032 return &bpf_get_current_task_proto;
1033 case BPF_FUNC_get_current_task_btf:
1034 return &bpf_get_current_task_btf_proto;
1035 case BPF_FUNC_get_current_uid_gid:
1036 return &bpf_get_current_uid_gid_proto;
1037 case BPF_FUNC_get_current_comm:
1038 return &bpf_get_current_comm_proto;
1039 case BPF_FUNC_trace_printk:
1040 return bpf_get_trace_printk_proto();
1041 case BPF_FUNC_get_smp_processor_id:
1042 return &bpf_get_smp_processor_id_proto;
1043 case BPF_FUNC_get_numa_node_id:
1044 return &bpf_get_numa_node_id_proto;
1045 case BPF_FUNC_perf_event_read:
1046 return &bpf_perf_event_read_proto;
1047 case BPF_FUNC_current_task_under_cgroup:
1048 return &bpf_current_task_under_cgroup_proto;
1049 case BPF_FUNC_get_prandom_u32:
1050 return &bpf_get_prandom_u32_proto;
1051 case BPF_FUNC_probe_write_user:
1052 return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1053 NULL : bpf_get_probe_write_proto();
1054 case BPF_FUNC_probe_read_user:
1055 return &bpf_probe_read_user_proto;
1056 case BPF_FUNC_probe_read_kernel:
1057 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1058 NULL : &bpf_probe_read_kernel_proto;
1059 case BPF_FUNC_probe_read_user_str:
1060 return &bpf_probe_read_user_str_proto;
1061 case BPF_FUNC_probe_read_kernel_str:
1062 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1063 NULL : &bpf_probe_read_kernel_str_proto;
1064 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1065 case BPF_FUNC_probe_read:
1066 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1067 NULL : &bpf_probe_read_compat_proto;
1068 case BPF_FUNC_probe_read_str:
1069 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1070 NULL : &bpf_probe_read_compat_str_proto;
1072 #ifdef CONFIG_CGROUPS
1073 case BPF_FUNC_get_current_cgroup_id:
1074 return &bpf_get_current_cgroup_id_proto;
1075 case BPF_FUNC_get_current_ancestor_cgroup_id:
1076 return &bpf_get_current_ancestor_cgroup_id_proto;
1078 case BPF_FUNC_send_signal:
1079 return &bpf_send_signal_proto;
1080 case BPF_FUNC_send_signal_thread:
1081 return &bpf_send_signal_thread_proto;
1082 case BPF_FUNC_perf_event_read_value:
1083 return &bpf_perf_event_read_value_proto;
1084 case BPF_FUNC_get_ns_current_pid_tgid:
1085 return &bpf_get_ns_current_pid_tgid_proto;
1086 case BPF_FUNC_ringbuf_output:
1087 return &bpf_ringbuf_output_proto;
1088 case BPF_FUNC_ringbuf_reserve:
1089 return &bpf_ringbuf_reserve_proto;
1090 case BPF_FUNC_ringbuf_submit:
1091 return &bpf_ringbuf_submit_proto;
1092 case BPF_FUNC_ringbuf_discard:
1093 return &bpf_ringbuf_discard_proto;
1094 case BPF_FUNC_ringbuf_query:
1095 return &bpf_ringbuf_query_proto;
1096 case BPF_FUNC_jiffies64:
1097 return &bpf_jiffies64_proto;
1098 case BPF_FUNC_get_task_stack:
1099 return &bpf_get_task_stack_proto;
1100 case BPF_FUNC_copy_from_user:
1101 return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1102 case BPF_FUNC_snprintf_btf:
1103 return &bpf_snprintf_btf_proto;
1104 case BPF_FUNC_per_cpu_ptr:
1105 return &bpf_per_cpu_ptr_proto;
1106 case BPF_FUNC_this_cpu_ptr:
1107 return &bpf_this_cpu_ptr_proto;
1108 case BPF_FUNC_task_storage_get:
1109 return &bpf_task_storage_get_proto;
1110 case BPF_FUNC_task_storage_delete:
1111 return &bpf_task_storage_delete_proto;
1112 case BPF_FUNC_for_each_map_elem:
1113 return &bpf_for_each_map_elem_proto;
1114 case BPF_FUNC_snprintf:
1115 return &bpf_snprintf_proto;
1116 case BPF_FUNC_get_func_ip:
1117 return &bpf_get_func_ip_proto_tracing;
1119 return bpf_base_func_proto(func_id);
1123 static const struct bpf_func_proto *
1124 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1127 case BPF_FUNC_perf_event_output:
1128 return &bpf_perf_event_output_proto;
1129 case BPF_FUNC_get_stackid:
1130 return &bpf_get_stackid_proto;
1131 case BPF_FUNC_get_stack:
1132 return &bpf_get_stack_proto;
1133 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1134 case BPF_FUNC_override_return:
1135 return &bpf_override_return_proto;
1137 case BPF_FUNC_get_func_ip:
1138 return &bpf_get_func_ip_proto_kprobe;
1139 case BPF_FUNC_get_attach_cookie:
1140 return &bpf_get_attach_cookie_proto_trace;
1142 return bpf_tracing_func_proto(func_id, prog);
1146 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1147 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1148 const struct bpf_prog *prog,
1149 struct bpf_insn_access_aux *info)
1151 if (off < 0 || off >= sizeof(struct pt_regs))
1153 if (type != BPF_READ)
1155 if (off % size != 0)
1158 * Assertion for 32 bit to make sure last 8 byte access
1159 * (BPF_DW) to the last 4 byte member is disallowed.
1161 if (off + size > sizeof(struct pt_regs))
1167 const struct bpf_verifier_ops kprobe_verifier_ops = {
1168 .get_func_proto = kprobe_prog_func_proto,
1169 .is_valid_access = kprobe_prog_is_valid_access,
1172 const struct bpf_prog_ops kprobe_prog_ops = {
1175 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1176 u64, flags, void *, data, u64, size)
1178 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1181 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1182 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1183 * from there and call the same bpf_perf_event_output() helper inline.
1185 return ____bpf_perf_event_output(regs, map, flags, data, size);
1188 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1189 .func = bpf_perf_event_output_tp,
1191 .ret_type = RET_INTEGER,
1192 .arg1_type = ARG_PTR_TO_CTX,
1193 .arg2_type = ARG_CONST_MAP_PTR,
1194 .arg3_type = ARG_ANYTHING,
1195 .arg4_type = ARG_PTR_TO_MEM,
1196 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1199 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1202 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1205 * Same comment as in bpf_perf_event_output_tp(), only that this time
1206 * the other helper's function body cannot be inlined due to being
1207 * external, thus we need to call raw helper function.
1209 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1213 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1214 .func = bpf_get_stackid_tp,
1216 .ret_type = RET_INTEGER,
1217 .arg1_type = ARG_PTR_TO_CTX,
1218 .arg2_type = ARG_CONST_MAP_PTR,
1219 .arg3_type = ARG_ANYTHING,
1222 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1225 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1227 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1228 (unsigned long) size, flags, 0);
1231 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1232 .func = bpf_get_stack_tp,
1234 .ret_type = RET_INTEGER,
1235 .arg1_type = ARG_PTR_TO_CTX,
1236 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1237 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1238 .arg4_type = ARG_ANYTHING,
1241 static const struct bpf_func_proto *
1242 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1245 case BPF_FUNC_perf_event_output:
1246 return &bpf_perf_event_output_proto_tp;
1247 case BPF_FUNC_get_stackid:
1248 return &bpf_get_stackid_proto_tp;
1249 case BPF_FUNC_get_stack:
1250 return &bpf_get_stack_proto_tp;
1251 case BPF_FUNC_get_attach_cookie:
1252 return &bpf_get_attach_cookie_proto_trace;
1254 return bpf_tracing_func_proto(func_id, prog);
1258 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1259 const struct bpf_prog *prog,
1260 struct bpf_insn_access_aux *info)
1262 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1264 if (type != BPF_READ)
1266 if (off % size != 0)
1269 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1273 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1274 .get_func_proto = tp_prog_func_proto,
1275 .is_valid_access = tp_prog_is_valid_access,
1278 const struct bpf_prog_ops tracepoint_prog_ops = {
1281 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1282 struct bpf_perf_event_value *, buf, u32, size)
1286 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1288 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1294 memset(buf, 0, size);
1298 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1299 .func = bpf_perf_prog_read_value,
1301 .ret_type = RET_INTEGER,
1302 .arg1_type = ARG_PTR_TO_CTX,
1303 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
1304 .arg3_type = ARG_CONST_SIZE,
1307 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1308 void *, buf, u32, size, u64, flags)
1313 static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1314 struct perf_branch_stack *br_stack = ctx->data->br_stack;
1317 if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1320 if (unlikely(!br_stack))
1323 if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1324 return br_stack->nr * br_entry_size;
1326 if (!buf || (size % br_entry_size != 0))
1329 to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1330 memcpy(buf, br_stack->entries, to_copy);
1336 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1337 .func = bpf_read_branch_records,
1339 .ret_type = RET_INTEGER,
1340 .arg1_type = ARG_PTR_TO_CTX,
1341 .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
1342 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1343 .arg4_type = ARG_ANYTHING,
1346 static const struct bpf_func_proto *
1347 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1350 case BPF_FUNC_perf_event_output:
1351 return &bpf_perf_event_output_proto_tp;
1352 case BPF_FUNC_get_stackid:
1353 return &bpf_get_stackid_proto_pe;
1354 case BPF_FUNC_get_stack:
1355 return &bpf_get_stack_proto_pe;
1356 case BPF_FUNC_perf_prog_read_value:
1357 return &bpf_perf_prog_read_value_proto;
1358 case BPF_FUNC_read_branch_records:
1359 return &bpf_read_branch_records_proto;
1360 case BPF_FUNC_get_attach_cookie:
1361 return &bpf_get_attach_cookie_proto_pe;
1363 return bpf_tracing_func_proto(func_id, prog);
1368 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1369 * to avoid potential recursive reuse issue when/if tracepoints are added
1370 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1372 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1373 * in normal, irq, and nmi context.
1375 struct bpf_raw_tp_regs {
1376 struct pt_regs regs[3];
1378 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1379 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1380 static struct pt_regs *get_bpf_raw_tp_regs(void)
1382 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1383 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1385 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1386 this_cpu_dec(bpf_raw_tp_nest_level);
1387 return ERR_PTR(-EBUSY);
1390 return &tp_regs->regs[nest_level - 1];
1393 static void put_bpf_raw_tp_regs(void)
1395 this_cpu_dec(bpf_raw_tp_nest_level);
1398 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1399 struct bpf_map *, map, u64, flags, void *, data, u64, size)
1401 struct pt_regs *regs = get_bpf_raw_tp_regs();
1405 return PTR_ERR(regs);
1407 perf_fetch_caller_regs(regs);
1408 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1410 put_bpf_raw_tp_regs();
1414 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1415 .func = bpf_perf_event_output_raw_tp,
1417 .ret_type = RET_INTEGER,
1418 .arg1_type = ARG_PTR_TO_CTX,
1419 .arg2_type = ARG_CONST_MAP_PTR,
1420 .arg3_type = ARG_ANYTHING,
1421 .arg4_type = ARG_PTR_TO_MEM,
1422 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
1425 extern const struct bpf_func_proto bpf_skb_output_proto;
1426 extern const struct bpf_func_proto bpf_xdp_output_proto;
1428 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1429 struct bpf_map *, map, u64, flags)
1431 struct pt_regs *regs = get_bpf_raw_tp_regs();
1435 return PTR_ERR(regs);
1437 perf_fetch_caller_regs(regs);
1438 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1439 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1441 put_bpf_raw_tp_regs();
1445 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1446 .func = bpf_get_stackid_raw_tp,
1448 .ret_type = RET_INTEGER,
1449 .arg1_type = ARG_PTR_TO_CTX,
1450 .arg2_type = ARG_CONST_MAP_PTR,
1451 .arg3_type = ARG_ANYTHING,
1454 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1455 void *, buf, u32, size, u64, flags)
1457 struct pt_regs *regs = get_bpf_raw_tp_regs();
1461 return PTR_ERR(regs);
1463 perf_fetch_caller_regs(regs);
1464 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1465 (unsigned long) size, flags, 0);
1466 put_bpf_raw_tp_regs();
1470 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1471 .func = bpf_get_stack_raw_tp,
1473 .ret_type = RET_INTEGER,
1474 .arg1_type = ARG_PTR_TO_CTX,
1475 .arg2_type = ARG_PTR_TO_MEM,
1476 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1477 .arg4_type = ARG_ANYTHING,
1480 static const struct bpf_func_proto *
1481 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1484 case BPF_FUNC_perf_event_output:
1485 return &bpf_perf_event_output_proto_raw_tp;
1486 case BPF_FUNC_get_stackid:
1487 return &bpf_get_stackid_proto_raw_tp;
1488 case BPF_FUNC_get_stack:
1489 return &bpf_get_stack_proto_raw_tp;
1491 return bpf_tracing_func_proto(func_id, prog);
1495 const struct bpf_func_proto *
1496 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1498 const struct bpf_func_proto *fn;
1502 case BPF_FUNC_skb_output:
1503 return &bpf_skb_output_proto;
1504 case BPF_FUNC_xdp_output:
1505 return &bpf_xdp_output_proto;
1506 case BPF_FUNC_skc_to_tcp6_sock:
1507 return &bpf_skc_to_tcp6_sock_proto;
1508 case BPF_FUNC_skc_to_tcp_sock:
1509 return &bpf_skc_to_tcp_sock_proto;
1510 case BPF_FUNC_skc_to_tcp_timewait_sock:
1511 return &bpf_skc_to_tcp_timewait_sock_proto;
1512 case BPF_FUNC_skc_to_tcp_request_sock:
1513 return &bpf_skc_to_tcp_request_sock_proto;
1514 case BPF_FUNC_skc_to_udp6_sock:
1515 return &bpf_skc_to_udp6_sock_proto;
1516 case BPF_FUNC_sk_storage_get:
1517 return &bpf_sk_storage_get_tracing_proto;
1518 case BPF_FUNC_sk_storage_delete:
1519 return &bpf_sk_storage_delete_tracing_proto;
1520 case BPF_FUNC_sock_from_file:
1521 return &bpf_sock_from_file_proto;
1522 case BPF_FUNC_get_socket_cookie:
1523 return &bpf_get_socket_ptr_cookie_proto;
1525 case BPF_FUNC_seq_printf:
1526 return prog->expected_attach_type == BPF_TRACE_ITER ?
1527 &bpf_seq_printf_proto :
1529 case BPF_FUNC_seq_write:
1530 return prog->expected_attach_type == BPF_TRACE_ITER ?
1531 &bpf_seq_write_proto :
1533 case BPF_FUNC_seq_printf_btf:
1534 return prog->expected_attach_type == BPF_TRACE_ITER ?
1535 &bpf_seq_printf_btf_proto :
1537 case BPF_FUNC_d_path:
1538 return &bpf_d_path_proto;
1540 fn = raw_tp_prog_func_proto(func_id, prog);
1541 if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
1542 fn = bpf_iter_get_func_proto(func_id, prog);
1547 static bool raw_tp_prog_is_valid_access(int off, int size,
1548 enum bpf_access_type type,
1549 const struct bpf_prog *prog,
1550 struct bpf_insn_access_aux *info)
1552 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1554 if (type != BPF_READ)
1556 if (off % size != 0)
1561 static bool tracing_prog_is_valid_access(int off, int size,
1562 enum bpf_access_type type,
1563 const struct bpf_prog *prog,
1564 struct bpf_insn_access_aux *info)
1566 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1568 if (type != BPF_READ)
1570 if (off % size != 0)
1572 return btf_ctx_access(off, size, type, prog, info);
1575 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1576 const union bpf_attr *kattr,
1577 union bpf_attr __user *uattr)
1582 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1583 .get_func_proto = raw_tp_prog_func_proto,
1584 .is_valid_access = raw_tp_prog_is_valid_access,
1587 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1589 .test_run = bpf_prog_test_run_raw_tp,
1593 const struct bpf_verifier_ops tracing_verifier_ops = {
1594 .get_func_proto = tracing_prog_func_proto,
1595 .is_valid_access = tracing_prog_is_valid_access,
1598 const struct bpf_prog_ops tracing_prog_ops = {
1599 .test_run = bpf_prog_test_run_tracing,
1602 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1603 enum bpf_access_type type,
1604 const struct bpf_prog *prog,
1605 struct bpf_insn_access_aux *info)
1608 if (size != sizeof(u64) || type != BPF_READ)
1610 info->reg_type = PTR_TO_TP_BUFFER;
1612 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1615 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1616 .get_func_proto = raw_tp_prog_func_proto,
1617 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1620 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1623 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1624 const struct bpf_prog *prog,
1625 struct bpf_insn_access_aux *info)
1627 const int size_u64 = sizeof(u64);
1629 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1631 if (type != BPF_READ)
1633 if (off % size != 0) {
1634 if (sizeof(unsigned long) != 4)
1638 if (off % size != 4)
1643 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1644 bpf_ctx_record_field_size(info, size_u64);
1645 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1648 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1649 bpf_ctx_record_field_size(info, size_u64);
1650 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1654 if (size != sizeof(long))
1661 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1662 const struct bpf_insn *si,
1663 struct bpf_insn *insn_buf,
1664 struct bpf_prog *prog, u32 *target_size)
1666 struct bpf_insn *insn = insn_buf;
1669 case offsetof(struct bpf_perf_event_data, sample_period):
1670 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1671 data), si->dst_reg, si->src_reg,
1672 offsetof(struct bpf_perf_event_data_kern, data));
1673 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1674 bpf_target_off(struct perf_sample_data, period, 8,
1677 case offsetof(struct bpf_perf_event_data, addr):
1678 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1679 data), si->dst_reg, si->src_reg,
1680 offsetof(struct bpf_perf_event_data_kern, data));
1681 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1682 bpf_target_off(struct perf_sample_data, addr, 8,
1686 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1687 regs), si->dst_reg, si->src_reg,
1688 offsetof(struct bpf_perf_event_data_kern, regs));
1689 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1694 return insn - insn_buf;
1697 const struct bpf_verifier_ops perf_event_verifier_ops = {
1698 .get_func_proto = pe_prog_func_proto,
1699 .is_valid_access = pe_prog_is_valid_access,
1700 .convert_ctx_access = pe_prog_convert_ctx_access,
1703 const struct bpf_prog_ops perf_event_prog_ops = {
1706 static DEFINE_MUTEX(bpf_event_mutex);
1708 #define BPF_TRACE_MAX_PROGS 64
1710 int perf_event_attach_bpf_prog(struct perf_event *event,
1711 struct bpf_prog *prog,
1714 struct bpf_prog_array *old_array;
1715 struct bpf_prog_array *new_array;
1719 * Kprobe override only works if they are on the function entry,
1720 * and only if they are on the opt-in list.
1722 if (prog->kprobe_override &&
1723 (!trace_kprobe_on_func_entry(event->tp_event) ||
1724 !trace_kprobe_error_injectable(event->tp_event)))
1727 mutex_lock(&bpf_event_mutex);
1732 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1734 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1739 ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
1743 /* set the new array to event->tp_event and set event->prog */
1745 event->bpf_cookie = bpf_cookie;
1746 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1747 bpf_prog_array_free(old_array);
1750 mutex_unlock(&bpf_event_mutex);
1754 void perf_event_detach_bpf_prog(struct perf_event *event)
1756 struct bpf_prog_array *old_array;
1757 struct bpf_prog_array *new_array;
1760 mutex_lock(&bpf_event_mutex);
1765 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1766 ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
1770 bpf_prog_array_delete_safe(old_array, event->prog);
1772 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1773 bpf_prog_array_free(old_array);
1776 bpf_prog_put(event->prog);
1780 mutex_unlock(&bpf_event_mutex);
1783 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1785 struct perf_event_query_bpf __user *uquery = info;
1786 struct perf_event_query_bpf query = {};
1787 struct bpf_prog_array *progs;
1788 u32 *ids, prog_cnt, ids_len;
1791 if (!perfmon_capable())
1793 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1795 if (copy_from_user(&query, uquery, sizeof(query)))
1798 ids_len = query.ids_len;
1799 if (ids_len > BPF_TRACE_MAX_PROGS)
1801 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1805 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1806 * is required when user only wants to check for uquery->prog_cnt.
1807 * There is no need to check for it since the case is handled
1808 * gracefully in bpf_prog_array_copy_info.
1811 mutex_lock(&bpf_event_mutex);
1812 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1813 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1814 mutex_unlock(&bpf_event_mutex);
1816 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1817 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1824 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1825 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1827 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1829 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1831 for (; btp < __stop__bpf_raw_tp; btp++) {
1832 if (!strcmp(btp->tp->name, name))
1836 return bpf_get_raw_tracepoint_module(name);
1839 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1844 mod = __module_address((unsigned long)btp);
1849 static __always_inline
1850 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1854 (void) bpf_prog_run(prog, args);
1858 #define UNPACK(...) __VA_ARGS__
1859 #define REPEAT_1(FN, DL, X, ...) FN(X)
1860 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1861 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1862 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1863 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1864 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1865 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1866 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1867 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1868 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1869 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1870 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1871 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1873 #define SARG(X) u64 arg##X
1874 #define COPY(X) args[X] = arg##X
1876 #define __DL_COM (,)
1877 #define __DL_SEM (;)
1879 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1881 #define BPF_TRACE_DEFN_x(x) \
1882 void bpf_trace_run##x(struct bpf_prog *prog, \
1883 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1886 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1887 __bpf_trace_run(prog, args); \
1889 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1890 BPF_TRACE_DEFN_x(1);
1891 BPF_TRACE_DEFN_x(2);
1892 BPF_TRACE_DEFN_x(3);
1893 BPF_TRACE_DEFN_x(4);
1894 BPF_TRACE_DEFN_x(5);
1895 BPF_TRACE_DEFN_x(6);
1896 BPF_TRACE_DEFN_x(7);
1897 BPF_TRACE_DEFN_x(8);
1898 BPF_TRACE_DEFN_x(9);
1899 BPF_TRACE_DEFN_x(10);
1900 BPF_TRACE_DEFN_x(11);
1901 BPF_TRACE_DEFN_x(12);
1903 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1905 struct tracepoint *tp = btp->tp;
1908 * check that program doesn't access arguments beyond what's
1909 * available in this tracepoint
1911 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1914 if (prog->aux->max_tp_access > btp->writable_size)
1917 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
1921 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1923 return __bpf_probe_register(btp, prog);
1926 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1928 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1931 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1932 u32 *fd_type, const char **buf,
1933 u64 *probe_offset, u64 *probe_addr)
1935 bool is_tracepoint, is_syscall_tp;
1936 struct bpf_prog *prog;
1943 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1944 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1947 *prog_id = prog->aux->id;
1948 flags = event->tp_event->flags;
1949 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1950 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1952 if (is_tracepoint || is_syscall_tp) {
1953 *buf = is_tracepoint ? event->tp_event->tp->name
1954 : event->tp_event->name;
1955 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1956 *probe_offset = 0x0;
1961 #ifdef CONFIG_KPROBE_EVENTS
1962 if (flags & TRACE_EVENT_FL_KPROBE)
1963 err = bpf_get_kprobe_info(event, fd_type, buf,
1964 probe_offset, probe_addr,
1965 event->attr.type == PERF_TYPE_TRACEPOINT);
1967 #ifdef CONFIG_UPROBE_EVENTS
1968 if (flags & TRACE_EVENT_FL_UPROBE)
1969 err = bpf_get_uprobe_info(event, fd_type, buf,
1971 event->attr.type == PERF_TYPE_TRACEPOINT);
1978 static int __init send_signal_irq_work_init(void)
1981 struct send_signal_irq_work *work;
1983 for_each_possible_cpu(cpu) {
1984 work = per_cpu_ptr(&send_signal_work, cpu);
1985 init_irq_work(&work->irq_work, do_bpf_send_signal);
1990 subsys_initcall(send_signal_irq_work_init);
1992 #ifdef CONFIG_MODULES
1993 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1996 struct bpf_trace_module *btm, *tmp;
1997 struct module *mod = module;
2000 if (mod->num_bpf_raw_events == 0 ||
2001 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2004 mutex_lock(&bpf_module_mutex);
2007 case MODULE_STATE_COMING:
2008 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2010 btm->module = module;
2011 list_add(&btm->list, &bpf_trace_modules);
2016 case MODULE_STATE_GOING:
2017 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2018 if (btm->module == module) {
2019 list_del(&btm->list);
2027 mutex_unlock(&bpf_module_mutex);
2030 return notifier_from_errno(ret);
2033 static struct notifier_block bpf_module_nb = {
2034 .notifier_call = bpf_event_notify,
2037 static int __init bpf_event_init(void)
2039 register_module_notifier(&bpf_module_nb);
2043 fs_initcall(bpf_event_init);
2044 #endif /* CONFIG_MODULES */