Merge branch 'omap-for-v4.16/soc' into omap-for-v4.16/fixes
[linux-2.6-microblaze.git] / kernel / trace / bpf_trace.c
index 40207c2..fc2838a 100644 (file)
 #include <linux/filter.h>
 #include <linux/uaccess.h>
 #include <linux/ctype.h>
+#include <linux/kprobes.h>
+#include <linux/error-injection.h>
+
+#include "trace_probe.h"
 #include "trace.h"
 
 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
@@ -76,6 +80,23 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 }
 EXPORT_SYMBOL_GPL(trace_call_bpf);
 
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
+{
+       regs_set_return_value(regs, rc);
+       override_function_with_return(regs);
+       return 0;
+}
+
+static const struct bpf_func_proto bpf_override_return_proto = {
+       .func           = bpf_override_return,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+};
+#endif
+
 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
 {
        int ret;
@@ -224,7 +245,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
  */
 #define __BPF_TP_EMIT()        __BPF_ARG3_TP()
 #define __BPF_TP(...)                                                  \
-       __trace_printk(1 /* Fake ip will not be printed. */,            \
+       __trace_printk(0 /* Fake ip */,                                 \
                       fmt, ##__VA_ARGS__)
 
 #define __BPF_ARG1_TP(...)                                             \
@@ -556,6 +577,10 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
                return &bpf_get_stackid_proto;
        case BPF_FUNC_perf_event_read_value:
                return &bpf_perf_event_read_value_proto;
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+       case BPF_FUNC_override_return:
+               return &bpf_override_return_proto;
+#endif
        default:
                return tracing_func_proto(func_id);
        }
@@ -773,6 +798,15 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
        struct bpf_prog_array *new_array;
        int ret = -EEXIST;
 
+       /*
+        * Kprobe override only works if they are on the function entry,
+        * and only if they are on the opt-in list.
+        */
+       if (prog->kprobe_override &&
+           (!trace_kprobe_on_func_entry(event->tp_event) ||
+            !trace_kprobe_error_injectable(event->tp_event)))
+               return -EINVAL;
+
        mutex_lock(&bpf_event_mutex);
 
        if (event->prog)
@@ -825,3 +859,26 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
 unlock:
        mutex_unlock(&bpf_event_mutex);
 }
+
+int perf_event_query_prog_array(struct perf_event *event, void __user *info)
+{
+       struct perf_event_query_bpf __user *uquery = info;
+       struct perf_event_query_bpf query = {};
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       if (event->attr.type != PERF_TYPE_TRACEPOINT)
+               return -EINVAL;
+       if (copy_from_user(&query, uquery, sizeof(query)))
+               return -EFAULT;
+
+       mutex_lock(&bpf_event_mutex);
+       ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
+                                      uquery->ids,
+                                      query.ids_len,
+                                      &uquery->prog_cnt);
+       mutex_unlock(&bpf_event_mutex);
+
+       return ret;
+}