Merge tag 'ktest-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux-2.6-microblaze.git] / kernel / trace / bpf_trace.c
index 048c655..6c0018a 100644 (file)
@@ -16,6 +16,9 @@
 #include <linux/syscalls.h>
 #include <linux/error-injection.h>
 #include <linux/btf_ids.h>
+#include <linux/bpf_lsm.h>
+
+#include <net/bpf_sk_storage.h>
 
 #include <uapi/linux/bpf.h>
 #include <uapi/linux/btf.h>
@@ -113,7 +116,7 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
         * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
         * to all call sites, we did a bpf_prog_array_valid() there to check
         * whether call->prog_array is empty or not, which is
-        * a heurisitc to speed up execution.
+        * a heuristic to speed up execution.
         *
         * If bpf_prog_array_valid() fetched prog_array was
         * non-NULL, we go into trace_call_bpf() and do the actual
@@ -1032,6 +1035,20 @@ const struct bpf_func_proto bpf_get_current_task_proto = {
        .ret_type       = RET_INTEGER,
 };
 
+BPF_CALL_0(bpf_get_current_task_btf)
+{
+       return (unsigned long) current;
+}
+
+BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
+
+static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
+       .func           = bpf_get_current_task_btf,
+       .gpl_only       = true,
+       .ret_type       = RET_PTR_TO_BTF_ID,
+       .ret_btf_id     = &bpf_get_current_btf_ids[0],
+};
+
 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -1096,7 +1113,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
                        return -EINVAL;
 
                work = this_cpu_ptr(&send_signal_work);
-               if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
+               if (irq_work_is_busy(&work->irq_work))
                        return -EBUSY;
 
                /* Add the current task, which is the target of sending signal,
@@ -1174,7 +1191,11 @@ BTF_SET_END(btf_allowlist_d_path)
 
 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
 {
-       return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
+       if (prog->type == BPF_PROG_TYPE_LSM)
+               return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
+
+       return btf_id_set_contains(&btf_allowlist_d_path,
+                                  prog->aux->attach_btf_id);
 }
 
 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
@@ -1269,12 +1290,16 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_ktime_get_ns_proto;
        case BPF_FUNC_ktime_get_boot_ns:
                return &bpf_ktime_get_boot_ns_proto;
+       case BPF_FUNC_ktime_get_coarse_ns:
+               return &bpf_ktime_get_coarse_ns_proto;
        case BPF_FUNC_tail_call:
                return &bpf_tail_call_proto;
        case BPF_FUNC_get_current_pid_tgid:
                return &bpf_get_current_pid_tgid_proto;
        case BPF_FUNC_get_current_task:
                return &bpf_get_current_task_proto;
+       case BPF_FUNC_get_current_task_btf:
+               return &bpf_get_current_task_btf_proto;
        case BPF_FUNC_get_current_uid_gid:
                return &bpf_get_current_uid_gid_proto;
        case BPF_FUNC_get_current_comm:
@@ -1337,9 +1362,9 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
        case BPF_FUNC_snprintf_btf:
                return &bpf_snprintf_btf_proto;
-       case BPF_FUNC_bpf_per_cpu_ptr:
+       case BPF_FUNC_per_cpu_ptr:
                return &bpf_per_cpu_ptr_proto;
-       case BPF_FUNC_bpf_this_cpu_ptr:
+       case BPF_FUNC_this_cpu_ptr:
                return &bpf_this_cpu_ptr_proto;
        default:
                return NULL;
@@ -1729,6 +1754,12 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_skc_to_tcp_request_sock_proto;
        case BPF_FUNC_skc_to_udp6_sock:
                return &bpf_skc_to_udp6_sock_proto;
+       case BPF_FUNC_sk_storage_get:
+               return &bpf_sk_storage_get_tracing_proto;
+       case BPF_FUNC_sk_storage_delete:
+               return &bpf_sk_storage_delete_tracing_proto;
+       case BPF_FUNC_sock_from_file:
+               return &bpf_sock_from_file_proto;
 #endif
        case BPF_FUNC_seq_printf:
                return prog->expected_attach_type == BPF_TRACE_ITER ?
@@ -2041,10 +2072,12 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
 
 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
 {
-       struct module *mod = __module_address((unsigned long)btp);
+       struct module *mod;
 
-       if (mod)
-               module_put(mod);
+       preempt_disable();
+       mod = __module_address((unsigned long)btp);
+       module_put(mod);
+       preempt_enable();
 }
 
 static __always_inline