1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
19 #include "trace_probe.h"
22 #define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
26 struct bpf_trace_module {
27 struct module *module;
28 struct list_head list;
31 static LIST_HEAD(bpf_trace_modules);
32 static DEFINE_MUTEX(bpf_module_mutex);
34 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
36 struct bpf_raw_event_map *btp, *ret = NULL;
37 struct bpf_trace_module *btm;
40 mutex_lock(&bpf_module_mutex);
41 list_for_each_entry(btm, &bpf_trace_modules, list) {
42 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 btp = &btm->module->bpf_raw_events[i];
44 if (!strcmp(btp->tp->name, name)) {
45 if (try_module_get(btm->module))
52 mutex_unlock(&bpf_module_mutex);
56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
60 #endif /* CONFIG_MODULES */
62 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
63 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
66 * trace_call_bpf - invoke BPF program
67 * @call: tracepoint event
68 * @ctx: opaque context pointer
70 * kprobe handlers execute BPF programs via this helper.
71 * Can be used from static tracepoints in the future.
73 * Return: BPF programs always return an integer which is interpreted by
75 * 0 - return from kprobe (event is filtered out)
76 * 1 - store kprobe event into ring buffer
77 * Other values are reserved and currently alias to 1
79 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
83 if (in_nmi()) /* not supported yet */
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
90 * since some bpf program is already running on this cpu,
91 * don't call into another bpf program (same or different)
92 * and don't send kprobe event into ring-buffer,
100 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 * to all call sites, we did a bpf_prog_array_valid() there to check
102 * whether call->prog_array is empty or not, which is
103 * a heurisitc to speed up execution.
105 * If bpf_prog_array_valid() fetched prog_array was
106 * non-NULL, we go into trace_call_bpf() and do the actual
107 * proper rcu_dereference() under RCU lock.
108 * If it turns out that prog_array is NULL then, we bail out.
109 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 * was NULL, you'll skip the prog_array with the risk of missing
111 * out of events when it was updated in between this and the
112 * rcu_dereference() which is accepted risk.
114 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
117 __this_cpu_dec(bpf_prog_active);
122 EXPORT_SYMBOL_GPL(trace_call_bpf);
124 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
125 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
127 regs_set_return_value(regs, rc);
128 override_function_with_return(regs);
132 static const struct bpf_func_proto bpf_override_return_proto = {
133 .func = bpf_override_return,
135 .ret_type = RET_INTEGER,
136 .arg1_type = ARG_PTR_TO_CTX,
137 .arg2_type = ARG_ANYTHING,
141 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
145 ret = security_locked_down(LOCKDOWN_BPF_READ);
149 ret = probe_kernel_read(dst, unsafe_ptr, size);
150 if (unlikely(ret < 0))
152 memset(dst, 0, size);
157 static const struct bpf_func_proto bpf_probe_read_proto = {
158 .func = bpf_probe_read,
160 .ret_type = RET_INTEGER,
161 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
162 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
163 .arg3_type = ARG_ANYTHING,
166 BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
170 * Ensure we're in user context which is safe for the helper to
171 * run. This helper has no business in a kthread.
173 * access_ok() should prevent writing to non-user memory, but in
174 * some situations (nommu, temporary switch, etc) access_ok() does
175 * not provide enough validation, hence the check on KERNEL_DS.
177 * nmi_uaccess_okay() ensures the probe is not run in an interim
178 * state, when the task or mm are switched. This is specifically
179 * required to prevent the use of temporary mm.
182 if (unlikely(in_interrupt() ||
183 current->flags & (PF_KTHREAD | PF_EXITING)))
185 if (unlikely(uaccess_kernel()))
187 if (unlikely(!nmi_uaccess_okay()))
189 if (!access_ok(unsafe_ptr, size))
192 return probe_kernel_write(unsafe_ptr, src, size);
195 static const struct bpf_func_proto bpf_probe_write_user_proto = {
196 .func = bpf_probe_write_user,
198 .ret_type = RET_INTEGER,
199 .arg1_type = ARG_ANYTHING,
200 .arg2_type = ARG_PTR_TO_MEM,
201 .arg3_type = ARG_CONST_SIZE,
204 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
206 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
207 current->comm, task_pid_nr(current));
209 return &bpf_probe_write_user_proto;
213 * Only limited trace_printk() conversion specifiers allowed:
214 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
216 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
217 u64, arg2, u64, arg3)
219 bool str_seen = false;
227 * bpf_check()->check_func_arg()->check_stack_boundary()
228 * guarantees that fmt points to bpf program stack,
229 * fmt_size bytes of it were initialized and fmt_size > 0
231 if (fmt[--fmt_size] != 0)
234 /* check format string for allowed specifiers */
235 for (i = 0; i < fmt_size; i++) {
236 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
245 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
250 } else if (fmt[i] == 'p' || fmt[i] == 's') {
252 /* disallow any further format extensions */
253 if (fmt[i + 1] != 0 &&
254 !isspace(fmt[i + 1]) &&
255 !ispunct(fmt[i + 1]))
260 /* allow only one '%s' per fmt string */
279 strncpy_from_unsafe(buf,
280 (void *) (long) unsafe_addr,
291 if (fmt[i] != 'i' && fmt[i] != 'd' &&
292 fmt[i] != 'u' && fmt[i] != 'x')
297 /* Horrid workaround for getting va_list handling working with different
298 * argument type combinations generically for 32 and 64 bit archs.
300 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
301 #define __BPF_TP(...) \
302 __trace_printk(0 /* Fake ip */, \
305 #define __BPF_ARG1_TP(...) \
306 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
307 ? __BPF_TP(arg1, ##__VA_ARGS__) \
308 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
309 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
310 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
312 #define __BPF_ARG2_TP(...) \
313 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
314 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
315 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
316 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
317 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
319 #define __BPF_ARG3_TP(...) \
320 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
321 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
322 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
323 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
324 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
326 return __BPF_TP_EMIT();
329 static const struct bpf_func_proto bpf_trace_printk_proto = {
330 .func = bpf_trace_printk,
332 .ret_type = RET_INTEGER,
333 .arg1_type = ARG_PTR_TO_MEM,
334 .arg2_type = ARG_CONST_SIZE,
337 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
340 * this program might be calling bpf_trace_printk,
341 * so allocate per-cpu printk buffers
343 trace_printk_init_buffers();
345 return &bpf_trace_printk_proto;
348 static __always_inline int
349 get_map_perf_counter(struct bpf_map *map, u64 flags,
350 u64 *value, u64 *enabled, u64 *running)
352 struct bpf_array *array = container_of(map, struct bpf_array, map);
353 unsigned int cpu = smp_processor_id();
354 u64 index = flags & BPF_F_INDEX_MASK;
355 struct bpf_event_entry *ee;
357 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
359 if (index == BPF_F_CURRENT_CPU)
361 if (unlikely(index >= array->map.max_entries))
364 ee = READ_ONCE(array->ptrs[index]);
368 return perf_event_read_local(ee->event, value, enabled, running);
371 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
376 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
378 * this api is ugly since we miss [-22..-2] range of valid
379 * counter values, but that's uapi
386 static const struct bpf_func_proto bpf_perf_event_read_proto = {
387 .func = bpf_perf_event_read,
389 .ret_type = RET_INTEGER,
390 .arg1_type = ARG_CONST_MAP_PTR,
391 .arg2_type = ARG_ANYTHING,
394 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
395 struct bpf_perf_event_value *, buf, u32, size)
399 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
401 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
407 memset(buf, 0, size);
411 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
412 .func = bpf_perf_event_read_value,
414 .ret_type = RET_INTEGER,
415 .arg1_type = ARG_CONST_MAP_PTR,
416 .arg2_type = ARG_ANYTHING,
417 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
418 .arg4_type = ARG_CONST_SIZE,
421 static __always_inline u64
422 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
423 u64 flags, struct perf_sample_data *sd)
425 struct bpf_array *array = container_of(map, struct bpf_array, map);
426 unsigned int cpu = smp_processor_id();
427 u64 index = flags & BPF_F_INDEX_MASK;
428 struct bpf_event_entry *ee;
429 struct perf_event *event;
431 if (index == BPF_F_CURRENT_CPU)
433 if (unlikely(index >= array->map.max_entries))
436 ee = READ_ONCE(array->ptrs[index]);
441 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
442 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
445 if (unlikely(event->oncpu != cpu))
448 return perf_event_output(event, sd, regs);
452 * Support executing tracepoints in normal, irq, and nmi context that each call
453 * bpf_perf_event_output
455 struct bpf_trace_sample_data {
456 struct perf_sample_data sds[3];
459 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
460 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
461 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
462 u64, flags, void *, data, u64, size)
464 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
465 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
466 struct perf_raw_record raw = {
472 struct perf_sample_data *sd;
475 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
480 sd = &sds->sds[nest_level - 1];
482 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
487 perf_sample_data_init(sd, 0, 0);
490 err = __bpf_perf_event_output(regs, map, flags, sd);
493 this_cpu_dec(bpf_trace_nest_level);
497 static const struct bpf_func_proto bpf_perf_event_output_proto = {
498 .func = bpf_perf_event_output,
500 .ret_type = RET_INTEGER,
501 .arg1_type = ARG_PTR_TO_CTX,
502 .arg2_type = ARG_CONST_MAP_PTR,
503 .arg3_type = ARG_ANYTHING,
504 .arg4_type = ARG_PTR_TO_MEM,
505 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
508 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
509 static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
511 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
512 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
514 struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
515 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
516 struct perf_raw_frag frag = {
521 struct perf_raw_record raw = {
524 .next = ctx_size ? &frag : NULL,
531 perf_fetch_caller_regs(regs);
532 perf_sample_data_init(sd, 0, 0);
535 return __bpf_perf_event_output(regs, map, flags, sd);
538 BPF_CALL_0(bpf_get_current_task)
540 return (long) current;
543 static const struct bpf_func_proto bpf_get_current_task_proto = {
544 .func = bpf_get_current_task,
546 .ret_type = RET_INTEGER,
549 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
551 struct bpf_array *array = container_of(map, struct bpf_array, map);
554 if (unlikely(idx >= array->map.max_entries))
557 cgrp = READ_ONCE(array->ptrs[idx]);
561 return task_under_cgroup_hierarchy(current, cgrp);
564 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
565 .func = bpf_current_task_under_cgroup,
567 .ret_type = RET_INTEGER,
568 .arg1_type = ARG_CONST_MAP_PTR,
569 .arg2_type = ARG_ANYTHING,
572 BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
573 const void *, unsafe_ptr)
577 ret = security_locked_down(LOCKDOWN_BPF_READ);
582 * The strncpy_from_unsafe() call will likely not fill the entire
583 * buffer, but that's okay in this circumstance as we're probing
584 * arbitrary memory anyway similar to bpf_probe_read() and might
585 * as well probe the stack. Thus, memory is explicitly cleared
586 * only in error case, so that improper users ignoring return
587 * code altogether don't copy garbage; otherwise length of string
588 * is returned that can be used for bpf_perf_event_output() et al.
590 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
591 if (unlikely(ret < 0))
593 memset(dst, 0, size);
598 static const struct bpf_func_proto bpf_probe_read_str_proto = {
599 .func = bpf_probe_read_str,
601 .ret_type = RET_INTEGER,
602 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
603 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
604 .arg3_type = ARG_ANYTHING,
607 struct send_signal_irq_work {
608 struct irq_work irq_work;
609 struct task_struct *task;
613 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
615 static void do_bpf_send_signal(struct irq_work *entry)
617 struct send_signal_irq_work *work;
619 work = container_of(entry, struct send_signal_irq_work, irq_work);
620 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
623 BPF_CALL_1(bpf_send_signal, u32, sig)
625 struct send_signal_irq_work *work = NULL;
627 /* Similar to bpf_probe_write_user, task needs to be
628 * in a sound condition and kernel memory access be
629 * permitted in order to send signal to the current
632 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
634 if (unlikely(uaccess_kernel()))
636 if (unlikely(!nmi_uaccess_okay()))
640 /* Do an early check on signal validity. Otherwise,
641 * the error is lost in deferred irq_work.
643 if (unlikely(!valid_signal(sig)))
646 work = this_cpu_ptr(&send_signal_work);
647 if (work->irq_work.flags & IRQ_WORK_BUSY)
650 /* Add the current task, which is the target of sending signal,
651 * to the irq_work. The current task may change when queued
652 * irq works get executed.
654 work->task = current;
656 irq_work_queue(&work->irq_work);
660 return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
663 static const struct bpf_func_proto bpf_send_signal_proto = {
664 .func = bpf_send_signal,
666 .ret_type = RET_INTEGER,
667 .arg1_type = ARG_ANYTHING,
670 static const struct bpf_func_proto *
671 tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
674 case BPF_FUNC_map_lookup_elem:
675 return &bpf_map_lookup_elem_proto;
676 case BPF_FUNC_map_update_elem:
677 return &bpf_map_update_elem_proto;
678 case BPF_FUNC_map_delete_elem:
679 return &bpf_map_delete_elem_proto;
680 case BPF_FUNC_map_push_elem:
681 return &bpf_map_push_elem_proto;
682 case BPF_FUNC_map_pop_elem:
683 return &bpf_map_pop_elem_proto;
684 case BPF_FUNC_map_peek_elem:
685 return &bpf_map_peek_elem_proto;
686 case BPF_FUNC_probe_read:
687 return &bpf_probe_read_proto;
688 case BPF_FUNC_ktime_get_ns:
689 return &bpf_ktime_get_ns_proto;
690 case BPF_FUNC_tail_call:
691 return &bpf_tail_call_proto;
692 case BPF_FUNC_get_current_pid_tgid:
693 return &bpf_get_current_pid_tgid_proto;
694 case BPF_FUNC_get_current_task:
695 return &bpf_get_current_task_proto;
696 case BPF_FUNC_get_current_uid_gid:
697 return &bpf_get_current_uid_gid_proto;
698 case BPF_FUNC_get_current_comm:
699 return &bpf_get_current_comm_proto;
700 case BPF_FUNC_trace_printk:
701 return bpf_get_trace_printk_proto();
702 case BPF_FUNC_get_smp_processor_id:
703 return &bpf_get_smp_processor_id_proto;
704 case BPF_FUNC_get_numa_node_id:
705 return &bpf_get_numa_node_id_proto;
706 case BPF_FUNC_perf_event_read:
707 return &bpf_perf_event_read_proto;
708 case BPF_FUNC_probe_write_user:
709 return bpf_get_probe_write_proto();
710 case BPF_FUNC_current_task_under_cgroup:
711 return &bpf_current_task_under_cgroup_proto;
712 case BPF_FUNC_get_prandom_u32:
713 return &bpf_get_prandom_u32_proto;
714 case BPF_FUNC_probe_read_str:
715 return &bpf_probe_read_str_proto;
716 #ifdef CONFIG_CGROUPS
717 case BPF_FUNC_get_current_cgroup_id:
718 return &bpf_get_current_cgroup_id_proto;
720 case BPF_FUNC_send_signal:
721 return &bpf_send_signal_proto;
727 static const struct bpf_func_proto *
728 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
731 case BPF_FUNC_perf_event_output:
732 return &bpf_perf_event_output_proto;
733 case BPF_FUNC_get_stackid:
734 return &bpf_get_stackid_proto;
735 case BPF_FUNC_get_stack:
736 return &bpf_get_stack_proto;
737 case BPF_FUNC_perf_event_read_value:
738 return &bpf_perf_event_read_value_proto;
739 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
740 case BPF_FUNC_override_return:
741 return &bpf_override_return_proto;
744 return tracing_func_proto(func_id, prog);
748 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
749 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
750 const struct bpf_prog *prog,
751 struct bpf_insn_access_aux *info)
753 if (off < 0 || off >= sizeof(struct pt_regs))
755 if (type != BPF_READ)
760 * Assertion for 32 bit to make sure last 8 byte access
761 * (BPF_DW) to the last 4 byte member is disallowed.
763 if (off + size > sizeof(struct pt_regs))
769 const struct bpf_verifier_ops kprobe_verifier_ops = {
770 .get_func_proto = kprobe_prog_func_proto,
771 .is_valid_access = kprobe_prog_is_valid_access,
774 const struct bpf_prog_ops kprobe_prog_ops = {
777 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
778 u64, flags, void *, data, u64, size)
780 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
783 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
784 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
785 * from there and call the same bpf_perf_event_output() helper inline.
787 return ____bpf_perf_event_output(regs, map, flags, data, size);
790 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
791 .func = bpf_perf_event_output_tp,
793 .ret_type = RET_INTEGER,
794 .arg1_type = ARG_PTR_TO_CTX,
795 .arg2_type = ARG_CONST_MAP_PTR,
796 .arg3_type = ARG_ANYTHING,
797 .arg4_type = ARG_PTR_TO_MEM,
798 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
801 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
804 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
807 * Same comment as in bpf_perf_event_output_tp(), only that this time
808 * the other helper's function body cannot be inlined due to being
809 * external, thus we need to call raw helper function.
811 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
815 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
816 .func = bpf_get_stackid_tp,
818 .ret_type = RET_INTEGER,
819 .arg1_type = ARG_PTR_TO_CTX,
820 .arg2_type = ARG_CONST_MAP_PTR,
821 .arg3_type = ARG_ANYTHING,
824 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
827 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
829 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
830 (unsigned long) size, flags, 0);
833 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
834 .func = bpf_get_stack_tp,
836 .ret_type = RET_INTEGER,
837 .arg1_type = ARG_PTR_TO_CTX,
838 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
839 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
840 .arg4_type = ARG_ANYTHING,
843 static const struct bpf_func_proto *
844 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
847 case BPF_FUNC_perf_event_output:
848 return &bpf_perf_event_output_proto_tp;
849 case BPF_FUNC_get_stackid:
850 return &bpf_get_stackid_proto_tp;
851 case BPF_FUNC_get_stack:
852 return &bpf_get_stack_proto_tp;
854 return tracing_func_proto(func_id, prog);
858 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
859 const struct bpf_prog *prog,
860 struct bpf_insn_access_aux *info)
862 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
864 if (type != BPF_READ)
869 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
873 const struct bpf_verifier_ops tracepoint_verifier_ops = {
874 .get_func_proto = tp_prog_func_proto,
875 .is_valid_access = tp_prog_is_valid_access,
878 const struct bpf_prog_ops tracepoint_prog_ops = {
881 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
882 struct bpf_perf_event_value *, buf, u32, size)
886 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
888 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
894 memset(buf, 0, size);
898 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
899 .func = bpf_perf_prog_read_value,
901 .ret_type = RET_INTEGER,
902 .arg1_type = ARG_PTR_TO_CTX,
903 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
904 .arg3_type = ARG_CONST_SIZE,
907 static const struct bpf_func_proto *
908 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
911 case BPF_FUNC_perf_event_output:
912 return &bpf_perf_event_output_proto_tp;
913 case BPF_FUNC_get_stackid:
914 return &bpf_get_stackid_proto_tp;
915 case BPF_FUNC_get_stack:
916 return &bpf_get_stack_proto_tp;
917 case BPF_FUNC_perf_prog_read_value:
918 return &bpf_perf_prog_read_value_proto;
920 return tracing_func_proto(func_id, prog);
925 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
926 * to avoid potential recursive reuse issue when/if tracepoints are added
927 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
929 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
930 * in normal, irq, and nmi context.
932 struct bpf_raw_tp_regs {
933 struct pt_regs regs[3];
935 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
936 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
937 static struct pt_regs *get_bpf_raw_tp_regs(void)
939 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
940 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
942 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
943 this_cpu_dec(bpf_raw_tp_nest_level);
944 return ERR_PTR(-EBUSY);
947 return &tp_regs->regs[nest_level - 1];
950 static void put_bpf_raw_tp_regs(void)
952 this_cpu_dec(bpf_raw_tp_nest_level);
955 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
956 struct bpf_map *, map, u64, flags, void *, data, u64, size)
958 struct pt_regs *regs = get_bpf_raw_tp_regs();
962 return PTR_ERR(regs);
964 perf_fetch_caller_regs(regs);
965 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
967 put_bpf_raw_tp_regs();
971 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
972 .func = bpf_perf_event_output_raw_tp,
974 .ret_type = RET_INTEGER,
975 .arg1_type = ARG_PTR_TO_CTX,
976 .arg2_type = ARG_CONST_MAP_PTR,
977 .arg3_type = ARG_ANYTHING,
978 .arg4_type = ARG_PTR_TO_MEM,
979 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
982 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
983 struct bpf_map *, map, u64, flags)
985 struct pt_regs *regs = get_bpf_raw_tp_regs();
989 return PTR_ERR(regs);
991 perf_fetch_caller_regs(regs);
992 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
993 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
995 put_bpf_raw_tp_regs();
999 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1000 .func = bpf_get_stackid_raw_tp,
1002 .ret_type = RET_INTEGER,
1003 .arg1_type = ARG_PTR_TO_CTX,
1004 .arg2_type = ARG_CONST_MAP_PTR,
1005 .arg3_type = ARG_ANYTHING,
1008 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1009 void *, buf, u32, size, u64, flags)
1011 struct pt_regs *regs = get_bpf_raw_tp_regs();
1015 return PTR_ERR(regs);
1017 perf_fetch_caller_regs(regs);
1018 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1019 (unsigned long) size, flags, 0);
1020 put_bpf_raw_tp_regs();
1024 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1025 .func = bpf_get_stack_raw_tp,
1027 .ret_type = RET_INTEGER,
1028 .arg1_type = ARG_PTR_TO_CTX,
1029 .arg2_type = ARG_PTR_TO_MEM,
1030 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1031 .arg4_type = ARG_ANYTHING,
1034 static const struct bpf_func_proto *
1035 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1038 case BPF_FUNC_perf_event_output:
1039 return &bpf_perf_event_output_proto_raw_tp;
1040 case BPF_FUNC_get_stackid:
1041 return &bpf_get_stackid_proto_raw_tp;
1042 case BPF_FUNC_get_stack:
1043 return &bpf_get_stack_proto_raw_tp;
1045 return tracing_func_proto(func_id, prog);
1049 static bool raw_tp_prog_is_valid_access(int off, int size,
1050 enum bpf_access_type type,
1051 const struct bpf_prog *prog,
1052 struct bpf_insn_access_aux *info)
1054 /* largest tracepoint in the kernel has 12 args */
1055 if (off < 0 || off >= sizeof(__u64) * 12)
1057 if (type != BPF_READ)
1059 if (off % size != 0)
1064 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1065 .get_func_proto = raw_tp_prog_func_proto,
1066 .is_valid_access = raw_tp_prog_is_valid_access,
1069 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1072 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1073 enum bpf_access_type type,
1074 const struct bpf_prog *prog,
1075 struct bpf_insn_access_aux *info)
1078 if (size != sizeof(u64) || type != BPF_READ)
1080 info->reg_type = PTR_TO_TP_BUFFER;
1082 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1085 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1086 .get_func_proto = raw_tp_prog_func_proto,
1087 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1090 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1093 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1094 const struct bpf_prog *prog,
1095 struct bpf_insn_access_aux *info)
1097 const int size_u64 = sizeof(u64);
1099 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1101 if (type != BPF_READ)
1103 if (off % size != 0) {
1104 if (sizeof(unsigned long) != 4)
1108 if (off % size != 4)
1113 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1114 bpf_ctx_record_field_size(info, size_u64);
1115 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1118 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1119 bpf_ctx_record_field_size(info, size_u64);
1120 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1124 if (size != sizeof(long))
1131 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1132 const struct bpf_insn *si,
1133 struct bpf_insn *insn_buf,
1134 struct bpf_prog *prog, u32 *target_size)
1136 struct bpf_insn *insn = insn_buf;
1139 case offsetof(struct bpf_perf_event_data, sample_period):
1140 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1141 data), si->dst_reg, si->src_reg,
1142 offsetof(struct bpf_perf_event_data_kern, data));
1143 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1144 bpf_target_off(struct perf_sample_data, period, 8,
1147 case offsetof(struct bpf_perf_event_data, addr):
1148 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1149 data), si->dst_reg, si->src_reg,
1150 offsetof(struct bpf_perf_event_data_kern, data));
1151 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1152 bpf_target_off(struct perf_sample_data, addr, 8,
1156 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1157 regs), si->dst_reg, si->src_reg,
1158 offsetof(struct bpf_perf_event_data_kern, regs));
1159 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1164 return insn - insn_buf;
1167 const struct bpf_verifier_ops perf_event_verifier_ops = {
1168 .get_func_proto = pe_prog_func_proto,
1169 .is_valid_access = pe_prog_is_valid_access,
1170 .convert_ctx_access = pe_prog_convert_ctx_access,
1173 const struct bpf_prog_ops perf_event_prog_ops = {
1176 static DEFINE_MUTEX(bpf_event_mutex);
1178 #define BPF_TRACE_MAX_PROGS 64
1180 int perf_event_attach_bpf_prog(struct perf_event *event,
1181 struct bpf_prog *prog)
1183 struct bpf_prog_array *old_array;
1184 struct bpf_prog_array *new_array;
1188 * Kprobe override only works if they are on the function entry,
1189 * and only if they are on the opt-in list.
1191 if (prog->kprobe_override &&
1192 (!trace_kprobe_on_func_entry(event->tp_event) ||
1193 !trace_kprobe_error_injectable(event->tp_event)))
1196 mutex_lock(&bpf_event_mutex);
1201 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1203 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1208 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1212 /* set the new array to event->tp_event and set event->prog */
1214 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1215 bpf_prog_array_free(old_array);
1218 mutex_unlock(&bpf_event_mutex);
1222 void perf_event_detach_bpf_prog(struct perf_event *event)
1224 struct bpf_prog_array *old_array;
1225 struct bpf_prog_array *new_array;
1228 mutex_lock(&bpf_event_mutex);
1233 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1234 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1238 bpf_prog_array_delete_safe(old_array, event->prog);
1240 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1241 bpf_prog_array_free(old_array);
1244 bpf_prog_put(event->prog);
1248 mutex_unlock(&bpf_event_mutex);
1251 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1253 struct perf_event_query_bpf __user *uquery = info;
1254 struct perf_event_query_bpf query = {};
1255 struct bpf_prog_array *progs;
1256 u32 *ids, prog_cnt, ids_len;
1259 if (!capable(CAP_SYS_ADMIN))
1261 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1263 if (copy_from_user(&query, uquery, sizeof(query)))
1266 ids_len = query.ids_len;
1267 if (ids_len > BPF_TRACE_MAX_PROGS)
1269 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1273 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1274 * is required when user only wants to check for uquery->prog_cnt.
1275 * There is no need to check for it since the case is handled
1276 * gracefully in bpf_prog_array_copy_info.
1279 mutex_lock(&bpf_event_mutex);
1280 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1281 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1282 mutex_unlock(&bpf_event_mutex);
1284 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1285 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1292 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1293 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1295 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1297 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1299 for (; btp < __stop__bpf_raw_tp; btp++) {
1300 if (!strcmp(btp->tp->name, name))
1304 return bpf_get_raw_tracepoint_module(name);
1307 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1309 struct module *mod = __module_address((unsigned long)btp);
1315 static __always_inline
1316 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1320 (void) BPF_PROG_RUN(prog, args);
1325 #define UNPACK(...) __VA_ARGS__
1326 #define REPEAT_1(FN, DL, X, ...) FN(X)
1327 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1328 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1329 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1330 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1331 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1332 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1333 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1334 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1335 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1336 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1337 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1338 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1340 #define SARG(X) u64 arg##X
1341 #define COPY(X) args[X] = arg##X
1343 #define __DL_COM (,)
1344 #define __DL_SEM (;)
1346 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1348 #define BPF_TRACE_DEFN_x(x) \
1349 void bpf_trace_run##x(struct bpf_prog *prog, \
1350 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1353 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1354 __bpf_trace_run(prog, args); \
1356 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1357 BPF_TRACE_DEFN_x(1);
1358 BPF_TRACE_DEFN_x(2);
1359 BPF_TRACE_DEFN_x(3);
1360 BPF_TRACE_DEFN_x(4);
1361 BPF_TRACE_DEFN_x(5);
1362 BPF_TRACE_DEFN_x(6);
1363 BPF_TRACE_DEFN_x(7);
1364 BPF_TRACE_DEFN_x(8);
1365 BPF_TRACE_DEFN_x(9);
1366 BPF_TRACE_DEFN_x(10);
1367 BPF_TRACE_DEFN_x(11);
1368 BPF_TRACE_DEFN_x(12);
1370 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1372 struct tracepoint *tp = btp->tp;
1375 * check that program doesn't access arguments beyond what's
1376 * available in this tracepoint
1378 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1381 if (prog->aux->max_tp_access > btp->writable_size)
1384 return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1387 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1389 return __bpf_probe_register(btp, prog);
1392 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1394 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1397 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1398 u32 *fd_type, const char **buf,
1399 u64 *probe_offset, u64 *probe_addr)
1401 bool is_tracepoint, is_syscall_tp;
1402 struct bpf_prog *prog;
1409 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1410 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1413 *prog_id = prog->aux->id;
1414 flags = event->tp_event->flags;
1415 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1416 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1418 if (is_tracepoint || is_syscall_tp) {
1419 *buf = is_tracepoint ? event->tp_event->tp->name
1420 : event->tp_event->name;
1421 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1422 *probe_offset = 0x0;
1427 #ifdef CONFIG_KPROBE_EVENTS
1428 if (flags & TRACE_EVENT_FL_KPROBE)
1429 err = bpf_get_kprobe_info(event, fd_type, buf,
1430 probe_offset, probe_addr,
1431 event->attr.type == PERF_TYPE_TRACEPOINT);
1433 #ifdef CONFIG_UPROBE_EVENTS
1434 if (flags & TRACE_EVENT_FL_UPROBE)
1435 err = bpf_get_uprobe_info(event, fd_type, buf,
1437 event->attr.type == PERF_TYPE_TRACEPOINT);
1444 static int __init send_signal_irq_work_init(void)
1447 struct send_signal_irq_work *work;
1449 for_each_possible_cpu(cpu) {
1450 work = per_cpu_ptr(&send_signal_work, cpu);
1451 init_irq_work(&work->irq_work, do_bpf_send_signal);
1456 subsys_initcall(send_signal_irq_work_init);
1458 #ifdef CONFIG_MODULES
1459 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1462 struct bpf_trace_module *btm, *tmp;
1463 struct module *mod = module;
1465 if (mod->num_bpf_raw_events == 0 ||
1466 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1469 mutex_lock(&bpf_module_mutex);
1472 case MODULE_STATE_COMING:
1473 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1475 btm->module = module;
1476 list_add(&btm->list, &bpf_trace_modules);
1479 case MODULE_STATE_GOING:
1480 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1481 if (btm->module == module) {
1482 list_del(&btm->list);
1490 mutex_unlock(&bpf_module_mutex);
1495 static struct notifier_block bpf_module_nb = {
1496 .notifier_call = bpf_event_notify,
1499 static int __init bpf_event_init(void)
1501 register_module_notifier(&bpf_module_nb);
1505 fs_initcall(bpf_event_init);
1506 #endif /* CONFIG_MODULES */