bpf: Allow some trace helpers for all prog types
authorFeng Yang <yangfeng@kylinos.cn>
Tue, 6 May 2025 06:14:33 +0000 (14:14 +0800)
committerAndrii Nakryiko <andrii@kernel.org>
Fri, 9 May 2025 17:37:10 +0000 (10:37 -0700)
if it works under NMI and doesn't use any context-dependent things,
should be fine for any program type. The detailed discussion is in [1].

[1] https://lore.kernel.org/all/CAEf4Bza6gK3dsrTosk6k3oZgtHesNDSrDd8sdeQ-GiS6oJixQg@mail.gmail.com/

Suggested-by: Andrii Nakryiko <andrii.nakryiko@gmail.com>
Signed-off-by: Feng Yang <yangfeng@kylinos.cn>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/bpf/20250506061434.94277-2-yangfeng59949@163.com
include/linux/bpf-cgroup.h
kernel/bpf/cgroup.c
kernel/bpf/helpers.c
kernel/trace/bpf_trace.c
net/core/filter.c

index 9de7adb..4847dca 100644 (file)
@@ -427,8 +427,6 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
 
 const struct bpf_func_proto *
 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
-const struct bpf_func_proto *
-cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
 #else
 
 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
@@ -465,12 +463,6 @@ cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        return NULL;
 }
 
-static inline const struct bpf_func_proto *
-cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
-{
-       return NULL;
-}
-
 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
                                            struct bpf_map *map) { return 0; }
 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
index 84f58f3..62a1d8d 100644 (file)
@@ -1653,10 +1653,6 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        if (func_proto)
                return func_proto;
 
-       func_proto = cgroup_current_func_proto(func_id, prog);
-       if (func_proto)
-               return func_proto;
-
        switch (func_id) {
        case BPF_FUNC_perf_event_output:
                return &bpf_event_output_data_proto;
@@ -2204,10 +2200,6 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        if (func_proto)
                return func_proto;
 
-       func_proto = cgroup_current_func_proto(func_id, prog);
-       if (func_proto)
-               return func_proto;
-
        switch (func_id) {
        case BPF_FUNC_sysctl_get_name:
                return &bpf_sysctl_get_name_proto;
@@ -2351,10 +2343,6 @@ cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        if (func_proto)
                return func_proto;
 
-       func_proto = cgroup_current_func_proto(func_id, prog);
-       if (func_proto)
-               return func_proto;
-
        switch (func_id) {
 #ifdef CONFIG_NET
        case BPF_FUNC_get_netns_cookie:
@@ -2601,23 +2589,3 @@ cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return NULL;
        }
 }
-
-/* Common helpers for cgroup hooks with valid process context. */
-const struct bpf_func_proto *
-cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
-{
-       switch (func_id) {
-       case BPF_FUNC_get_current_uid_gid:
-               return &bpf_get_current_uid_gid_proto;
-       case BPF_FUNC_get_current_comm:
-               return &bpf_get_current_comm_proto;
-#ifdef CONFIG_CGROUP_NET_CLASSID
-       case BPF_FUNC_get_cgroup_classid:
-               return &bpf_get_cgroup_classid_curr_proto;
-#endif
-       case BPF_FUNC_current_task_under_cgroup:
-               return &bpf_current_task_under_cgroup_proto;
-       default:
-               return NULL;
-       }
-}
index 78cefb4..fed53da 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/btf_ids.h>
 #include <linux/bpf_mem_alloc.h>
 #include <linux/kasan.h>
+#include <linux/bpf_verifier.h>
 
 #include "../../lib/kstrtox.h"
 
@@ -1912,6 +1913,12 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
+const struct bpf_func_proto bpf_perf_event_read_proto __weak;
+const struct bpf_func_proto bpf_send_signal_proto __weak;
+const struct bpf_func_proto bpf_send_signal_thread_proto __weak;
+const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak;
+const struct bpf_func_proto bpf_get_task_stack_proto __weak;
+const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak;
 
 const struct bpf_func_proto *
 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
@@ -1965,6 +1972,8 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_get_current_pid_tgid_proto;
        case BPF_FUNC_get_ns_current_pid_tgid:
                return &bpf_get_ns_current_pid_tgid_proto;
+       case BPF_FUNC_get_current_uid_gid:
+               return &bpf_get_current_uid_gid_proto;
        default:
                break;
        }
@@ -2022,7 +2031,21 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_get_current_cgroup_id_proto;
        case BPF_FUNC_get_current_ancestor_cgroup_id:
                return &bpf_get_current_ancestor_cgroup_id_proto;
+       case BPF_FUNC_current_task_under_cgroup:
+               return &bpf_current_task_under_cgroup_proto;
 #endif
+#ifdef CONFIG_CGROUP_NET_CLASSID
+       case BPF_FUNC_get_cgroup_classid:
+               return &bpf_get_cgroup_classid_curr_proto;
+#endif
+       case BPF_FUNC_task_storage_get:
+               if (bpf_prog_check_recur(prog))
+                       return &bpf_task_storage_get_recur_proto;
+               return &bpf_task_storage_get_proto;
+       case BPF_FUNC_task_storage_delete:
+               if (bpf_prog_check_recur(prog))
+                       return &bpf_task_storage_delete_recur_proto;
+               return &bpf_task_storage_delete_proto;
        default:
                break;
        }
@@ -2037,6 +2060,8 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_get_current_task_proto;
        case BPF_FUNC_get_current_task_btf:
                return &bpf_get_current_task_btf_proto;
+       case BPF_FUNC_get_current_comm:
+               return &bpf_get_current_comm_proto;
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
@@ -2047,6 +2072,10 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        case BPF_FUNC_probe_read_kernel_str:
                return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_kernel_str_proto;
+       case BPF_FUNC_copy_from_user:
+               return &bpf_copy_from_user_proto;
+       case BPF_FUNC_copy_from_user_task:
+               return &bpf_copy_from_user_task_proto;
        case BPF_FUNC_snprintf_btf:
                return &bpf_snprintf_btf_proto;
        case BPF_FUNC_snprintf:
@@ -2057,6 +2086,19 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return bpf_get_trace_vprintk_proto();
        case BPF_FUNC_perf_event_read_value:
                return bpf_get_perf_event_read_value_proto();
+       case BPF_FUNC_perf_event_read:
+               return &bpf_perf_event_read_proto;
+       case BPF_FUNC_send_signal:
+               return &bpf_send_signal_proto;
+       case BPF_FUNC_send_signal_thread:
+               return &bpf_send_signal_thread_proto;
+       case BPF_FUNC_get_task_stack:
+               return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
+                                      : &bpf_get_task_stack_proto;
+       case BPF_FUNC_get_branch_snapshot:
+               return &bpf_get_branch_snapshot_proto;
+       case BPF_FUNC_find_vma:
+               return &bpf_find_vma_proto;
        default:
                return NULL;
        }
index 52c432a..8689209 100644 (file)
@@ -572,7 +572,7 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
        return value;
 }
 
-static const struct bpf_func_proto bpf_perf_event_read_proto = {
+const struct bpf_func_proto bpf_perf_event_read_proto = {
        .func           = bpf_perf_event_read,
        .gpl_only       = true,
        .ret_type       = RET_INTEGER,
@@ -882,7 +882,7 @@ BPF_CALL_1(bpf_send_signal, u32, sig)
        return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0);
 }
 
-static const struct bpf_func_proto bpf_send_signal_proto = {
+const struct bpf_func_proto bpf_send_signal_proto = {
        .func           = bpf_send_signal,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -894,7 +894,7 @@ BPF_CALL_1(bpf_send_signal_thread, u32, sig)
        return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0);
 }
 
-static const struct bpf_func_proto bpf_send_signal_thread_proto = {
+const struct bpf_func_proto bpf_send_signal_thread_proto = {
        .func           = bpf_send_signal_thread,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -1185,7 +1185,7 @@ BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
        return entry_cnt * br_entry_size;
 }
 
-static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
+const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
        .func           = bpf_get_branch_snapshot,
        .gpl_only       = true,
        .ret_type       = RET_INTEGER,
@@ -1430,14 +1430,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        const struct bpf_func_proto *func_proto;
 
        switch (func_id) {
-       case BPF_FUNC_get_current_uid_gid:
-               return &bpf_get_current_uid_gid_proto;
-       case BPF_FUNC_get_current_comm:
-               return &bpf_get_current_comm_proto;
        case BPF_FUNC_get_smp_processor_id:
                return &bpf_get_smp_processor_id_proto;
-       case BPF_FUNC_perf_event_read:
-               return &bpf_perf_event_read_proto;
 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        case BPF_FUNC_probe_read:
                return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
@@ -1446,35 +1440,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
                       NULL : &bpf_probe_read_compat_str_proto;
 #endif
-#ifdef CONFIG_CGROUPS
-       case BPF_FUNC_current_task_under_cgroup:
-               return &bpf_current_task_under_cgroup_proto;
-#endif
-       case BPF_FUNC_send_signal:
-               return &bpf_send_signal_proto;
-       case BPF_FUNC_send_signal_thread:
-               return &bpf_send_signal_thread_proto;
-       case BPF_FUNC_get_task_stack:
-               return prog->sleepable ? &bpf_get_task_stack_sleepable_proto
-                                      : &bpf_get_task_stack_proto;
-       case BPF_FUNC_copy_from_user:
-               return &bpf_copy_from_user_proto;
-       case BPF_FUNC_copy_from_user_task:
-               return &bpf_copy_from_user_task_proto;
-       case BPF_FUNC_task_storage_get:
-               if (bpf_prog_check_recur(prog))
-                       return &bpf_task_storage_get_recur_proto;
-               return &bpf_task_storage_get_proto;
-       case BPF_FUNC_task_storage_delete:
-               if (bpf_prog_check_recur(prog))
-                       return &bpf_task_storage_delete_recur_proto;
-               return &bpf_task_storage_delete_proto;
        case BPF_FUNC_get_func_ip:
                return &bpf_get_func_ip_proto_tracing;
-       case BPF_FUNC_get_branch_snapshot:
-               return &bpf_get_branch_snapshot_proto;
-       case BPF_FUNC_find_vma:
-               return &bpf_find_vma_proto;
        default:
                break;
        }
index 79cab4d..30e7d36 100644 (file)
@@ -8022,10 +8022,6 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        if (func_proto)
                return func_proto;
 
-       func_proto = cgroup_current_func_proto(func_id, prog);
-       if (func_proto)
-               return func_proto;
-
        switch (func_id) {
        case BPF_FUNC_get_socket_cookie:
                return &bpf_get_socket_cookie_sock_proto;
@@ -8051,10 +8047,6 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        if (func_proto)
                return func_proto;
 
-       func_proto = cgroup_current_func_proto(func_id, prog);
-       if (func_proto)
-               return func_proto;
-
        switch (func_id) {
        case BPF_FUNC_bind:
                switch (prog->expected_attach_type) {
@@ -8488,18 +8480,12 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_msg_pop_data_proto;
        case BPF_FUNC_perf_event_output:
                return &bpf_event_output_data_proto;
-       case BPF_FUNC_get_current_uid_gid:
-               return &bpf_get_current_uid_gid_proto;
        case BPF_FUNC_sk_storage_get:
                return &bpf_sk_storage_get_proto;
        case BPF_FUNC_sk_storage_delete:
                return &bpf_sk_storage_delete_proto;
        case BPF_FUNC_get_netns_cookie:
                return &bpf_get_netns_cookie_sk_msg_proto;
-#ifdef CONFIG_CGROUP_NET_CLASSID
-       case BPF_FUNC_get_cgroup_classid:
-               return &bpf_get_cgroup_classid_curr_proto;
-#endif
        default:
                return bpf_sk_base_func_proto(func_id, prog);
        }