perf: Ensure bpf_perf_link path is properly serialized
authorPeter Zijlstra <peterz@infradead.org>
Fri, 17 Jan 2025 09:54:50 +0000 (10:54 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 8 Apr 2025 18:55:46 +0000 (20:55 +0200)
Ravi reported that the bpf_perf_link_attach() usage of
perf_event_set_bpf_prog() is not serialized by ctx->mutex, unlike the
PERF_EVENT_IOC_SET_BPF case.

Reported-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lkml.kernel.org/r/20250307193305.486326750@infradead.org
kernel/events/core.c

index e93c195..a85d63b 100644 (file)
@@ -6239,6 +6239,9 @@ static int perf_event_set_output(struct perf_event *event,
 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
 static int perf_copy_attr(struct perf_event_attr __user *uattr,
                          struct perf_event_attr *attr);
+static int __perf_event_set_bpf_prog(struct perf_event *event,
+                                    struct bpf_prog *prog,
+                                    u64 bpf_cookie);
 
 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
 {
@@ -6301,7 +6304,7 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
                if (IS_ERR(prog))
                        return PTR_ERR(prog);
 
-               err = perf_event_set_bpf_prog(event, prog, 0);
+               err = __perf_event_set_bpf_prog(event, prog, 0);
                if (err) {
                        bpf_prog_put(prog);
                        return err;
@@ -11069,8 +11072,9 @@ static inline bool perf_event_is_tracing(struct perf_event *event)
        return false;
 }
 
-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
-                           u64 bpf_cookie)
+static int __perf_event_set_bpf_prog(struct perf_event *event,
+                                    struct bpf_prog *prog,
+                                    u64 bpf_cookie)
 {
        bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
 
@@ -11108,6 +11112,20 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
        return perf_event_attach_bpf_prog(event, prog, bpf_cookie);
 }
 
+int perf_event_set_bpf_prog(struct perf_event *event,
+                           struct bpf_prog *prog,
+                           u64 bpf_cookie)
+{
+       struct perf_event_context *ctx;
+       int ret;
+
+       ctx = perf_event_ctx_lock(event);
+       ret = __perf_event_set_bpf_prog(event, prog, bpf_cookie);
+       perf_event_ctx_unlock(event, ctx);
+
+       return ret;
+}
+
 void perf_event_free_bpf_prog(struct perf_event *event)
 {
        if (!event->prog)
@@ -11130,7 +11148,15 @@ static void perf_event_free_filter(struct perf_event *event)
 {
 }
 
-int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
+static int __perf_event_set_bpf_prog(struct perf_event *event,
+                                    struct bpf_prog *prog,
+                                    u64 bpf_cookie)
+{
+       return -ENOENT;
+}
+
+int perf_event_set_bpf_prog(struct perf_event *event,
+                           struct bpf_prog *prog,
                            u64 bpf_cookie)
 {
        return -ENOENT;