1 // SPDX-License-Identifier: GPL-2.0
3 * Kprobes-based tracing events
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
8 #define pr_fmt(fmt) "trace_kprobe: " fmt
10 #include <linux/module.h>
11 #include <linux/uaccess.h>
12 #include <linux/rculist.h>
13 #include <linux/error-injection.h>
15 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
17 #include "trace_dynevent.h"
18 #include "trace_kprobe_selftest.h"
19 #include "trace_probe.h"
20 #include "trace_probe_tmpl.h"
22 #define KPROBE_EVENT_SYSTEM "kprobes"
23 #define KRETPROBE_MAXACTIVE_MAX 4096
24 #define MAX_KPROBE_CMDLINE_SIZE 1024
26 /* Kprobe early definition from command line */
27 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
28 static bool kprobe_boot_events_enabled __initdata;
30 static int __init set_kprobe_boot_events(char *str)
32 strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
35 __setup("kprobe_event=", set_kprobe_boot_events);
37 static int trace_kprobe_create(int argc, const char **argv);
38 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
39 static int trace_kprobe_release(struct dyn_event *ev);
40 static bool trace_kprobe_is_busy(struct dyn_event *ev);
41 static bool trace_kprobe_match(const char *system, const char *event,
42 int argc, const char **argv, struct dyn_event *ev);
44 static struct dyn_event_operations trace_kprobe_ops = {
45 .create = trace_kprobe_create,
46 .show = trace_kprobe_show,
47 .is_busy = trace_kprobe_is_busy,
48 .free = trace_kprobe_release,
49 .match = trace_kprobe_match,
53 * Kprobe event core functions
56 struct dyn_event devent;
57 struct kretprobe rp; /* Use rp.kp for kprobe use */
58 unsigned long __percpu *nhit;
59 const char *symbol; /* symbol name */
60 struct trace_probe tp;
63 static bool is_trace_kprobe(struct dyn_event *ev)
65 return ev->ops == &trace_kprobe_ops;
68 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
70 return container_of(ev, struct trace_kprobe, devent);
74 * for_each_trace_kprobe - iterate over the trace_kprobe list
75 * @pos: the struct trace_kprobe * for each entry
76 * @dpos: the struct dyn_event * to use as a loop cursor
78 #define for_each_trace_kprobe(pos, dpos) \
79 for_each_dyn_event(dpos) \
80 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
82 #define SIZEOF_TRACE_KPROBE(n) \
83 (offsetof(struct trace_kprobe, tp.args) + \
84 (sizeof(struct probe_arg) * (n)))
86 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
88 return tk->rp.handler != NULL;
91 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
93 return tk->symbol ? tk->symbol : "unknown";
96 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
98 return tk->rp.kp.offset;
101 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
103 return !!(kprobe_gone(&tk->rp.kp));
106 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
109 int len = strlen(mod->name);
110 const char *name = trace_kprobe_symbol(tk);
111 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
114 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
121 p = strchr(tk->symbol, ':');
125 mutex_lock(&module_mutex);
126 ret = !!find_module(tk->symbol);
127 mutex_unlock(&module_mutex);
133 static bool trace_kprobe_is_busy(struct dyn_event *ev)
135 struct trace_kprobe *tk = to_trace_kprobe(ev);
137 return trace_probe_is_enabled(&tk->tp);
140 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
141 int argc, const char **argv)
143 char buf[MAX_ARGSTR_LEN + 1];
149 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
150 else if (tk->rp.kp.offset)
151 snprintf(buf, sizeof(buf), "%s+%u",
152 trace_kprobe_symbol(tk), tk->rp.kp.offset);
154 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
155 if (strcmp(buf, argv[0]))
159 return trace_probe_match_command_args(&tk->tp, argc, argv);
162 static bool trace_kprobe_match(const char *system, const char *event,
163 int argc, const char **argv, struct dyn_event *ev)
165 struct trace_kprobe *tk = to_trace_kprobe(ev);
167 return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
168 (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
169 trace_kprobe_match_command_head(tk, argc, argv);
172 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
174 unsigned long nhit = 0;
177 for_each_possible_cpu(cpu)
178 nhit += *per_cpu_ptr(tk->nhit, cpu);
183 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
185 return !(list_empty(&tk->rp.kp.list) &&
186 hlist_unhashed(&tk->rp.kp.hlist));
189 /* Return 0 if it fails to find the symbol address */
190 static nokprobe_inline
191 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
196 addr = (unsigned long)
197 kallsyms_lookup_name(trace_kprobe_symbol(tk));
199 addr += tk->rp.kp.offset;
201 addr = (unsigned long)tk->rp.kp.addr;
206 static nokprobe_inline struct trace_kprobe *
207 trace_kprobe_primary_from_call(struct trace_event_call *call)
209 struct trace_probe *tp;
211 tp = trace_probe_primary_from_call(call);
212 if (WARN_ON_ONCE(!tp))
215 return container_of(tp, struct trace_kprobe, tp);
218 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
220 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
222 return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
223 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
224 tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
227 bool trace_kprobe_error_injectable(struct trace_event_call *call)
229 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
231 return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
235 static int register_kprobe_event(struct trace_kprobe *tk);
236 static int unregister_kprobe_event(struct trace_kprobe *tk);
238 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
239 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
240 struct pt_regs *regs);
242 static void free_trace_kprobe(struct trace_kprobe *tk)
245 trace_probe_cleanup(&tk->tp);
247 free_percpu(tk->nhit);
253 * Allocate new trace_probe and initialize it (including kprobes).
255 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
261 int nargs, bool is_return)
263 struct trace_kprobe *tk;
266 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
270 tk->nhit = alloc_percpu(unsigned long);
275 tk->symbol = kstrdup(symbol, GFP_KERNEL);
278 tk->rp.kp.symbol_name = tk->symbol;
279 tk->rp.kp.offset = offs;
281 tk->rp.kp.addr = addr;
284 tk->rp.handler = kretprobe_dispatcher;
286 tk->rp.kp.pre_handler = kprobe_dispatcher;
288 tk->rp.maxactive = maxactive;
289 INIT_HLIST_NODE(&tk->rp.kp.hlist);
290 INIT_LIST_HEAD(&tk->rp.kp.list);
292 ret = trace_probe_init(&tk->tp, event, group);
296 dyn_event_init(&tk->devent, &trace_kprobe_ops);
299 free_trace_kprobe(tk);
303 static struct trace_kprobe *find_trace_kprobe(const char *event,
306 struct dyn_event *pos;
307 struct trace_kprobe *tk;
309 for_each_trace_kprobe(tk, pos)
310 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
311 strcmp(trace_probe_group_name(&tk->tp), group) == 0)
316 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
320 if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
321 if (trace_kprobe_is_return(tk))
322 ret = enable_kretprobe(&tk->rp);
324 ret = enable_kprobe(&tk->rp.kp);
330 static void __disable_trace_kprobe(struct trace_probe *tp)
332 struct trace_probe *pos;
333 struct trace_kprobe *tk;
335 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
336 tk = container_of(pos, struct trace_kprobe, tp);
337 if (!trace_kprobe_is_registered(tk))
339 if (trace_kprobe_is_return(tk))
340 disable_kretprobe(&tk->rp);
342 disable_kprobe(&tk->rp.kp);
348 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
350 static int enable_trace_kprobe(struct trace_event_call *call,
351 struct trace_event_file *file)
353 struct trace_probe *pos, *tp;
354 struct trace_kprobe *tk;
358 tp = trace_probe_primary_from_call(call);
359 if (WARN_ON_ONCE(!tp))
361 enabled = trace_probe_is_enabled(tp);
363 /* This also changes "enabled" state */
365 ret = trace_probe_add_file(tp, file);
369 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
374 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
375 tk = container_of(pos, struct trace_kprobe, tp);
376 if (trace_kprobe_has_gone(tk))
378 ret = __enable_trace_kprobe(tk);
385 /* Failed to enable one of them. Roll back all */
387 __disable_trace_kprobe(tp);
389 trace_probe_remove_file(tp, file);
391 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
398 * Disable trace_probe
399 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
401 static int disable_trace_kprobe(struct trace_event_call *call,
402 struct trace_event_file *file)
404 struct trace_probe *tp;
406 tp = trace_probe_primary_from_call(call);
407 if (WARN_ON_ONCE(!tp))
411 if (!trace_probe_get_file_link(tp, file))
413 if (!trace_probe_has_single_file(tp))
415 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
417 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
419 if (!trace_probe_is_enabled(tp))
420 __disable_trace_kprobe(tp);
425 * Synchronization is done in below function. For perf event,
426 * file == NULL and perf_trace_event_unreg() calls
427 * tracepoint_synchronize_unregister() to ensure synchronize
428 * event. We don't need to care about it.
430 trace_probe_remove_file(tp, file);
435 #if defined(CONFIG_KPROBES_ON_FTRACE) && \
436 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
437 static bool within_notrace_func(struct trace_kprobe *tk)
439 unsigned long offset, size, addr;
441 addr = trace_kprobe_address(tk);
442 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
445 /* Get the entry address of the target function */
449 * Since ftrace_location_range() does inclusive range check, we need
450 * to subtract 1 byte from the end address.
452 return !ftrace_location_range(addr, addr + size - 1);
455 #define within_notrace_func(tk) (false)
458 /* Internal register function - just handle k*probes and flags */
459 static int __register_trace_kprobe(struct trace_kprobe *tk)
463 if (trace_kprobe_is_registered(tk))
466 if (within_notrace_func(tk)) {
467 pr_warn("Could not probe notrace function %s\n",
468 trace_kprobe_symbol(tk));
472 for (i = 0; i < tk->tp.nr_args; i++) {
473 ret = traceprobe_update_arg(&tk->tp.args[i]);
478 /* Set/clear disabled flag according to tp->flag */
479 if (trace_probe_is_enabled(&tk->tp))
480 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
482 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
484 if (trace_kprobe_is_return(tk))
485 ret = register_kretprobe(&tk->rp);
487 ret = register_kprobe(&tk->rp.kp);
492 /* Internal unregister function - just handle k*probes and flags */
493 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
495 if (trace_kprobe_is_registered(tk)) {
496 if (trace_kprobe_is_return(tk))
497 unregister_kretprobe(&tk->rp);
499 unregister_kprobe(&tk->rp.kp);
500 /* Cleanup kprobe for reuse and mark it unregistered */
501 INIT_HLIST_NODE(&tk->rp.kp.hlist);
502 INIT_LIST_HEAD(&tk->rp.kp.list);
503 if (tk->rp.kp.symbol_name)
504 tk->rp.kp.addr = NULL;
508 /* Unregister a trace_probe and probe_event */
509 static int unregister_trace_kprobe(struct trace_kprobe *tk)
511 /* If other probes are on the event, just unregister kprobe */
512 if (trace_probe_has_sibling(&tk->tp))
515 /* Enabled event can not be unregistered */
516 if (trace_probe_is_enabled(&tk->tp))
519 /* Will fail if probe is being used by ftrace or perf */
520 if (unregister_kprobe_event(tk))
524 __unregister_trace_kprobe(tk);
525 dyn_event_remove(&tk->devent);
526 trace_probe_unlink(&tk->tp);
531 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
532 struct trace_kprobe *comp)
534 struct trace_probe_event *tpe = orig->tp.event;
535 struct trace_probe *pos;
538 list_for_each_entry(pos, &tpe->probes, list) {
539 orig = container_of(pos, struct trace_kprobe, tp);
540 if (strcmp(trace_kprobe_symbol(orig),
541 trace_kprobe_symbol(comp)) ||
542 trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
546 * trace_probe_compare_arg_type() ensured that nr_args and
547 * each argument name and type are same. Let's compare comm.
549 for (i = 0; i < orig->tp.nr_args; i++) {
550 if (strcmp(orig->tp.args[i].comm,
551 comp->tp.args[i].comm))
555 if (i == orig->tp.nr_args)
562 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
566 ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
568 /* Note that argument starts index = 2 */
569 trace_probe_log_set_index(ret + 1);
570 trace_probe_log_err(0, DIFF_ARG_TYPE);
573 if (trace_kprobe_has_same_kprobe(to, tk)) {
574 trace_probe_log_set_index(0);
575 trace_probe_log_err(0, SAME_PROBE);
579 /* Append to existing event */
580 ret = trace_probe_append(&tk->tp, &to->tp);
584 /* Register k*probe */
585 ret = __register_trace_kprobe(tk);
586 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
587 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
592 trace_probe_unlink(&tk->tp);
594 dyn_event_add(&tk->devent);
599 /* Register a trace_probe and probe_event */
600 static int register_trace_kprobe(struct trace_kprobe *tk)
602 struct trace_kprobe *old_tk;
605 mutex_lock(&event_mutex);
607 old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
608 trace_probe_group_name(&tk->tp));
610 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
611 trace_probe_log_set_index(0);
612 trace_probe_log_err(0, DIFF_PROBE_TYPE);
615 ret = append_trace_kprobe(tk, old_tk);
620 /* Register new event */
621 ret = register_kprobe_event(tk);
623 pr_warn("Failed to register probe event(%d)\n", ret);
627 /* Register k*probe */
628 ret = __register_trace_kprobe(tk);
629 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
630 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
635 unregister_kprobe_event(tk);
637 dyn_event_add(&tk->devent);
640 mutex_unlock(&event_mutex);
644 /* Module notifier call back, checking event on the module */
645 static int trace_kprobe_module_callback(struct notifier_block *nb,
646 unsigned long val, void *data)
648 struct module *mod = data;
649 struct dyn_event *pos;
650 struct trace_kprobe *tk;
653 if (val != MODULE_STATE_COMING)
656 /* Update probes on coming module */
657 mutex_lock(&event_mutex);
658 for_each_trace_kprobe(tk, pos) {
659 if (trace_kprobe_within_module(tk, mod)) {
660 /* Don't need to check busy - this should have gone. */
661 __unregister_trace_kprobe(tk);
662 ret = __register_trace_kprobe(tk);
664 pr_warn("Failed to re-register probe %s on %s: %d\n",
665 trace_probe_name(&tk->tp),
669 mutex_unlock(&event_mutex);
674 static struct notifier_block trace_kprobe_module_nb = {
675 .notifier_call = trace_kprobe_module_callback,
676 .priority = 1 /* Invoked after kprobe module callback */
679 /* Convert certain expected symbols into '_' when generating event names */
680 static inline void sanitize_event_name(char *name)
682 while (*name++ != '\0')
683 if (*name == ':' || *name == '.')
687 static int trace_kprobe_create(int argc, const char *argv[])
692 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
694 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
696 * $retval : fetch return value
697 * $stack : fetch stack address
698 * $stackN : fetch Nth of stack (N:0-)
699 * $comm : fetch current task comm
700 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
701 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
702 * %REG : fetch register REG
703 * Dereferencing memory fetch:
704 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
705 * Alias name of args:
706 * NAME=FETCHARG : set NAME as alias of FETCHARG.
708 * FETCHARG:TYPE : use TYPE instead of unsigned long.
710 struct trace_kprobe *tk = NULL;
712 bool is_return = false;
713 char *symbol = NULL, *tmp = NULL;
714 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
718 char buf[MAX_EVENT_NAME_LEN];
719 unsigned int flags = TPARG_FL_KERNEL;
721 switch (argv[0][0]) {
724 flags |= TPARG_FL_RETURN;
734 trace_probe_log_init("trace_kprobe", argc, argv);
736 event = strchr(&argv[0][1], ':');
740 if (isdigit(argv[0][1])) {
742 trace_probe_log_err(1, MAXACT_NO_KPROBE);
746 len = event - &argv[0][1] - 1;
748 len = strlen(&argv[0][1]);
749 if (len > MAX_EVENT_NAME_LEN - 1) {
750 trace_probe_log_err(1, BAD_MAXACT);
753 memcpy(buf, &argv[0][1], len);
755 ret = kstrtouint(buf, 0, &maxactive);
756 if (ret || !maxactive) {
757 trace_probe_log_err(1, BAD_MAXACT);
760 /* kretprobes instances are iterated over via a list. The
761 * maximum should stay reasonable.
763 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
764 trace_probe_log_err(1, MAXACT_TOO_BIG);
769 /* try to parse an address. if that fails, try to read the
770 * input as a symbol. */
771 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
772 trace_probe_log_set_index(1);
773 /* Check whether uprobe event specified */
774 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
778 /* a symbol specified */
779 symbol = kstrdup(argv[1], GFP_KERNEL);
782 /* TODO: support .init module functions */
783 ret = traceprobe_split_symbol_offset(symbol, &offset);
784 if (ret || offset < 0 || offset > UINT_MAX) {
785 trace_probe_log_err(0, BAD_PROBE_ADDR);
788 if (kprobe_on_func_entry(NULL, symbol, offset))
789 flags |= TPARG_FL_FENTRY;
790 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
791 trace_probe_log_err(0, BAD_RETPROBE);
796 trace_probe_log_set_index(0);
798 ret = traceprobe_parse_event_name(&event, &group, buf,
803 /* Make a new event name */
805 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
806 is_return ? 'r' : 'p', symbol, offset);
808 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
809 is_return ? 'r' : 'p', addr);
810 sanitize_event_name(buf);
815 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
816 argc - 2, is_return);
819 /* This must return -ENOMEM, else there is a bug */
820 WARN_ON_ONCE(ret != -ENOMEM);
821 goto out; /* We know tk is not allocated */
823 argc -= 2; argv += 2;
825 /* parse arguments */
826 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
827 tmp = kstrdup(argv[i], GFP_KERNEL);
833 trace_probe_log_set_index(i + 2);
834 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
837 goto error; /* This can be -ENOMEM */
840 ret = traceprobe_set_print_fmt(&tk->tp, is_return);
844 ret = register_trace_kprobe(tk);
846 trace_probe_log_set_index(1);
848 trace_probe_log_err(0, BAD_INSN_BNDRY);
849 else if (ret == -ENOENT)
850 trace_probe_log_err(0, BAD_PROBE_ADDR);
851 else if (ret != -ENOMEM && ret != -EEXIST)
852 trace_probe_log_err(0, FAIL_REG_PROBE);
857 trace_probe_log_clear();
864 free_trace_kprobe(tk);
868 static int create_or_delete_trace_kprobe(int argc, char **argv)
872 if (argv[0][0] == '-')
873 return dyn_event_release(argc, argv, &trace_kprobe_ops);
875 ret = trace_kprobe_create(argc, (const char **)argv);
876 return ret == -ECANCELED ? -EINVAL : ret;
879 static int trace_kprobe_release(struct dyn_event *ev)
881 struct trace_kprobe *tk = to_trace_kprobe(ev);
882 int ret = unregister_trace_kprobe(tk);
885 free_trace_kprobe(tk);
889 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
891 struct trace_kprobe *tk = to_trace_kprobe(ev);
894 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
895 seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
896 trace_probe_name(&tk->tp));
899 seq_printf(m, " 0x%p", tk->rp.kp.addr);
900 else if (tk->rp.kp.offset)
901 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
904 seq_printf(m, " %s", trace_kprobe_symbol(tk));
906 for (i = 0; i < tk->tp.nr_args; i++)
907 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
913 static int probes_seq_show(struct seq_file *m, void *v)
915 struct dyn_event *ev = v;
917 if (!is_trace_kprobe(ev))
920 return trace_kprobe_show(m, ev);
923 static const struct seq_operations probes_seq_op = {
924 .start = dyn_event_seq_start,
925 .next = dyn_event_seq_next,
926 .stop = dyn_event_seq_stop,
927 .show = probes_seq_show
930 static int probes_open(struct inode *inode, struct file *file)
934 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
935 ret = dyn_events_release_all(&trace_kprobe_ops);
940 return seq_open(file, &probes_seq_op);
943 static ssize_t probes_write(struct file *file, const char __user *buffer,
944 size_t count, loff_t *ppos)
946 return trace_parse_run_command(file, buffer, count, ppos,
947 create_or_delete_trace_kprobe);
950 static const struct file_operations kprobe_events_ops = {
951 .owner = THIS_MODULE,
955 .release = seq_release,
956 .write = probes_write,
959 /* Probes profiling interfaces */
960 static int probes_profile_seq_show(struct seq_file *m, void *v)
962 struct dyn_event *ev = v;
963 struct trace_kprobe *tk;
965 if (!is_trace_kprobe(ev))
968 tk = to_trace_kprobe(ev);
969 seq_printf(m, " %-44s %15lu %15lu\n",
970 trace_probe_name(&tk->tp),
971 trace_kprobe_nhit(tk),
977 static const struct seq_operations profile_seq_op = {
978 .start = dyn_event_seq_start,
979 .next = dyn_event_seq_next,
980 .stop = dyn_event_seq_stop,
981 .show = probes_profile_seq_show
984 static int profile_open(struct inode *inode, struct file *file)
986 return seq_open(file, &profile_seq_op);
989 static const struct file_operations kprobe_profile_ops = {
990 .owner = THIS_MODULE,
991 .open = profile_open,
994 .release = seq_release,
997 /* Kprobe specific fetch functions */
999 /* Return the length of string -- including null terminal byte */
1000 static nokprobe_inline int
1001 fetch_store_strlen(unsigned long addr)
1007 ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
1009 } while (c && ret == 0 && len < MAX_STRING_SIZE);
1011 return (ret < 0) ? ret : len;
1014 /* Return the length of string -- including null terminal byte */
1015 static nokprobe_inline int
1016 fetch_store_strlen_user(unsigned long addr)
1018 const void __user *uaddr = (__force const void __user *)addr;
1020 return strnlen_unsafe_user(uaddr, MAX_STRING_SIZE);
1024 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1025 * length and relative data location.
1027 static nokprobe_inline int
1028 fetch_store_string(unsigned long addr, void *dest, void *base)
1030 int maxlen = get_loc_len(*(u32 *)dest);
1034 if (unlikely(!maxlen))
1037 __dest = get_loc_data(dest, base);
1040 * Try to get string again, since the string can be changed while
1043 ret = strncpy_from_unsafe(__dest, (void *)addr, maxlen);
1045 *(u32 *)dest = make_data_loc(ret, __dest - base);
1051 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1052 * with max length and relative data location.
1054 static nokprobe_inline int
1055 fetch_store_string_user(unsigned long addr, void *dest, void *base)
1057 const void __user *uaddr = (__force const void __user *)addr;
1058 int maxlen = get_loc_len(*(u32 *)dest);
1062 if (unlikely(!maxlen))
1065 __dest = get_loc_data(dest, base);
1067 ret = strncpy_from_unsafe_user(__dest, uaddr, maxlen);
1069 *(u32 *)dest = make_data_loc(ret, __dest - base);
1074 static nokprobe_inline int
1075 probe_mem_read(void *dest, void *src, size_t size)
1077 return probe_kernel_read(dest, src, size);
1080 static nokprobe_inline int
1081 probe_mem_read_user(void *dest, void *src, size_t size)
1083 const void __user *uaddr = (__force const void __user *)src;
1085 return probe_user_read(dest, uaddr, size);
1088 /* Note that we don't verify it, since the code does not come from user space */
1090 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
1096 /* 1st stage: get value from context */
1099 val = regs_get_register(regs, code->param);
1101 case FETCH_OP_STACK:
1102 val = regs_get_kernel_stack_nth(regs, code->param);
1104 case FETCH_OP_STACKP:
1105 val = kernel_stack_pointer(regs);
1107 case FETCH_OP_RETVAL:
1108 val = regs_return_value(regs);
1111 val = code->immediate;
1114 val = (unsigned long)current->comm;
1117 val = (unsigned long)code->data;
1119 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1121 val = regs_get_kernel_argument(regs, code->param);
1124 case FETCH_NOP_SYMBOL: /* Ignore a place holder */
1132 return process_fetch_insn_bottom(code, val, dest, base);
1134 NOKPROBE_SYMBOL(process_fetch_insn)
1136 /* Kprobe handler */
1137 static nokprobe_inline void
1138 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1139 struct trace_event_file *trace_file)
1141 struct kprobe_trace_entry_head *entry;
1142 struct ring_buffer_event *event;
1143 struct ring_buffer *buffer;
1144 int size, dsize, pc;
1145 unsigned long irq_flags;
1146 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1148 WARN_ON(call != trace_file->event_call);
1150 if (trace_trigger_soft_disabled(trace_file))
1153 local_save_flags(irq_flags);
1154 pc = preempt_count();
1156 dsize = __get_data_size(&tk->tp, regs);
1157 size = sizeof(*entry) + tk->tp.size + dsize;
1159 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1161 size, irq_flags, pc);
1165 entry = ring_buffer_event_data(event);
1166 entry->ip = (unsigned long)tk->rp.kp.addr;
1167 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1169 event_trigger_unlock_commit_regs(trace_file, buffer, event,
1170 entry, irq_flags, pc, regs);
1174 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1176 struct event_file_link *link;
1178 trace_probe_for_each_link_rcu(link, &tk->tp)
1179 __kprobe_trace_func(tk, regs, link->file);
1181 NOKPROBE_SYMBOL(kprobe_trace_func);
1183 /* Kretprobe handler */
1184 static nokprobe_inline void
1185 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1186 struct pt_regs *regs,
1187 struct trace_event_file *trace_file)
1189 struct kretprobe_trace_entry_head *entry;
1190 struct ring_buffer_event *event;
1191 struct ring_buffer *buffer;
1192 int size, pc, dsize;
1193 unsigned long irq_flags;
1194 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1196 WARN_ON(call != trace_file->event_call);
1198 if (trace_trigger_soft_disabled(trace_file))
1201 local_save_flags(irq_flags);
1202 pc = preempt_count();
1204 dsize = __get_data_size(&tk->tp, regs);
1205 size = sizeof(*entry) + tk->tp.size + dsize;
1207 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1209 size, irq_flags, pc);
1213 entry = ring_buffer_event_data(event);
1214 entry->func = (unsigned long)tk->rp.kp.addr;
1215 entry->ret_ip = (unsigned long)ri->ret_addr;
1216 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1218 event_trigger_unlock_commit_regs(trace_file, buffer, event,
1219 entry, irq_flags, pc, regs);
1223 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1224 struct pt_regs *regs)
1226 struct event_file_link *link;
1228 trace_probe_for_each_link_rcu(link, &tk->tp)
1229 __kretprobe_trace_func(tk, ri, regs, link->file);
1231 NOKPROBE_SYMBOL(kretprobe_trace_func);
1233 /* Event entry printers */
1234 static enum print_line_t
1235 print_kprobe_event(struct trace_iterator *iter, int flags,
1236 struct trace_event *event)
1238 struct kprobe_trace_entry_head *field;
1239 struct trace_seq *s = &iter->seq;
1240 struct trace_probe *tp;
1242 field = (struct kprobe_trace_entry_head *)iter->ent;
1243 tp = trace_probe_primary_from_call(
1244 container_of(event, struct trace_event_call, event));
1245 if (WARN_ON_ONCE(!tp))
1248 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1250 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1253 trace_seq_putc(s, ')');
1255 if (print_probe_args(s, tp->args, tp->nr_args,
1256 (u8 *)&field[1], field) < 0)
1259 trace_seq_putc(s, '\n');
1261 return trace_handle_return(s);
1264 static enum print_line_t
1265 print_kretprobe_event(struct trace_iterator *iter, int flags,
1266 struct trace_event *event)
1268 struct kretprobe_trace_entry_head *field;
1269 struct trace_seq *s = &iter->seq;
1270 struct trace_probe *tp;
1272 field = (struct kretprobe_trace_entry_head *)iter->ent;
1273 tp = trace_probe_primary_from_call(
1274 container_of(event, struct trace_event_call, event));
1275 if (WARN_ON_ONCE(!tp))
1278 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1280 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1283 trace_seq_puts(s, " <- ");
1285 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1288 trace_seq_putc(s, ')');
1290 if (print_probe_args(s, tp->args, tp->nr_args,
1291 (u8 *)&field[1], field) < 0)
1294 trace_seq_putc(s, '\n');
1297 return trace_handle_return(s);
1301 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1304 struct kprobe_trace_entry_head field;
1305 struct trace_probe *tp;
1307 tp = trace_probe_primary_from_call(event_call);
1308 if (WARN_ON_ONCE(!tp))
1311 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1313 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1316 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1319 struct kretprobe_trace_entry_head field;
1320 struct trace_probe *tp;
1322 tp = trace_probe_primary_from_call(event_call);
1323 if (WARN_ON_ONCE(!tp))
1326 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1327 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1329 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1332 #ifdef CONFIG_PERF_EVENTS
1334 /* Kprobe profile handler */
1336 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1338 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1339 struct kprobe_trace_entry_head *entry;
1340 struct hlist_head *head;
1341 int size, __size, dsize;
1344 if (bpf_prog_array_valid(call)) {
1345 unsigned long orig_ip = instruction_pointer(regs);
1348 ret = trace_call_bpf(call, regs);
1351 * We need to check and see if we modified the pc of the
1352 * pt_regs, and if so return 1 so that we don't do the
1355 if (orig_ip != instruction_pointer(regs))
1361 head = this_cpu_ptr(call->perf_events);
1362 if (hlist_empty(head))
1365 dsize = __get_data_size(&tk->tp, regs);
1366 __size = sizeof(*entry) + tk->tp.size + dsize;
1367 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1368 size -= sizeof(u32);
1370 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1374 entry->ip = (unsigned long)tk->rp.kp.addr;
1375 memset(&entry[1], 0, dsize);
1376 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1377 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1381 NOKPROBE_SYMBOL(kprobe_perf_func);
1383 /* Kretprobe profile handler */
1385 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1386 struct pt_regs *regs)
1388 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1389 struct kretprobe_trace_entry_head *entry;
1390 struct hlist_head *head;
1391 int size, __size, dsize;
1394 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1397 head = this_cpu_ptr(call->perf_events);
1398 if (hlist_empty(head))
1401 dsize = __get_data_size(&tk->tp, regs);
1402 __size = sizeof(*entry) + tk->tp.size + dsize;
1403 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1404 size -= sizeof(u32);
1406 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1410 entry->func = (unsigned long)tk->rp.kp.addr;
1411 entry->ret_ip = (unsigned long)ri->ret_addr;
1412 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1413 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1416 NOKPROBE_SYMBOL(kretprobe_perf_func);
1418 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1419 const char **symbol, u64 *probe_offset,
1420 u64 *probe_addr, bool perf_type_tracepoint)
1422 const char *pevent = trace_event_name(event->tp_event);
1423 const char *group = event->tp_event->class->system;
1424 struct trace_kprobe *tk;
1426 if (perf_type_tracepoint)
1427 tk = find_trace_kprobe(pevent, group);
1429 tk = event->tp_event->data;
1433 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1434 : BPF_FD_TYPE_KPROBE;
1436 *symbol = tk->symbol;
1437 *probe_offset = tk->rp.kp.offset;
1442 *probe_addr = (unsigned long)tk->rp.kp.addr;
1446 #endif /* CONFIG_PERF_EVENTS */
1449 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1451 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1452 * lockless, but we can't race with this __init function.
1454 static int kprobe_register(struct trace_event_call *event,
1455 enum trace_reg type, void *data)
1457 struct trace_event_file *file = data;
1460 case TRACE_REG_REGISTER:
1461 return enable_trace_kprobe(event, file);
1462 case TRACE_REG_UNREGISTER:
1463 return disable_trace_kprobe(event, file);
1465 #ifdef CONFIG_PERF_EVENTS
1466 case TRACE_REG_PERF_REGISTER:
1467 return enable_trace_kprobe(event, NULL);
1468 case TRACE_REG_PERF_UNREGISTER:
1469 return disable_trace_kprobe(event, NULL);
1470 case TRACE_REG_PERF_OPEN:
1471 case TRACE_REG_PERF_CLOSE:
1472 case TRACE_REG_PERF_ADD:
1473 case TRACE_REG_PERF_DEL:
1480 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1482 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1485 raw_cpu_inc(*tk->nhit);
1487 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1488 kprobe_trace_func(tk, regs);
1489 #ifdef CONFIG_PERF_EVENTS
1490 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1491 ret = kprobe_perf_func(tk, regs);
1495 NOKPROBE_SYMBOL(kprobe_dispatcher);
1498 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1500 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1502 raw_cpu_inc(*tk->nhit);
1504 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1505 kretprobe_trace_func(tk, ri, regs);
1506 #ifdef CONFIG_PERF_EVENTS
1507 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1508 kretprobe_perf_func(tk, ri, regs);
1510 return 0; /* We don't tweek kernel, so just return 0 */
1512 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1514 static struct trace_event_functions kretprobe_funcs = {
1515 .trace = print_kretprobe_event
1518 static struct trace_event_functions kprobe_funcs = {
1519 .trace = print_kprobe_event
1522 static inline void init_trace_event_call(struct trace_kprobe *tk)
1524 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1526 if (trace_kprobe_is_return(tk)) {
1527 call->event.funcs = &kretprobe_funcs;
1528 call->class->define_fields = kretprobe_event_define_fields;
1530 call->event.funcs = &kprobe_funcs;
1531 call->class->define_fields = kprobe_event_define_fields;
1534 call->flags = TRACE_EVENT_FL_KPROBE;
1535 call->class->reg = kprobe_register;
1538 static int register_kprobe_event(struct trace_kprobe *tk)
1540 init_trace_event_call(tk);
1542 return trace_probe_register_event_call(&tk->tp);
1545 static int unregister_kprobe_event(struct trace_kprobe *tk)
1547 return trace_probe_unregister_event_call(&tk->tp);
1550 #ifdef CONFIG_PERF_EVENTS
1551 /* create a trace_kprobe, but don't add it to global lists */
1552 struct trace_event_call *
1553 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1556 struct trace_kprobe *tk;
1561 * local trace_kprobes are not added to dyn_event, so they are never
1562 * searched in find_trace_kprobe(). Therefore, there is no concern of
1563 * duplicated name here.
1565 event = func ? func : "DUMMY_EVENT";
1567 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1568 offs, 0 /* maxactive */, 0 /* nargs */,
1572 pr_info("Failed to allocate trace_probe.(%d)\n",
1574 return ERR_CAST(tk);
1577 init_trace_event_call(tk);
1579 if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1584 ret = __register_trace_kprobe(tk);
1588 return trace_probe_event_call(&tk->tp);
1590 free_trace_kprobe(tk);
1591 return ERR_PTR(ret);
1594 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1596 struct trace_kprobe *tk;
1598 tk = trace_kprobe_primary_from_call(event_call);
1602 if (trace_probe_is_enabled(&tk->tp)) {
1607 __unregister_trace_kprobe(tk);
1609 free_trace_kprobe(tk);
1611 #endif /* CONFIG_PERF_EVENTS */
1613 static __init void enable_boot_kprobe_events(void)
1615 struct trace_array *tr = top_trace_array();
1616 struct trace_event_file *file;
1617 struct trace_kprobe *tk;
1618 struct dyn_event *pos;
1620 mutex_lock(&event_mutex);
1621 for_each_trace_kprobe(tk, pos) {
1622 list_for_each_entry(file, &tr->events, list)
1623 if (file->event_call == trace_probe_event_call(&tk->tp))
1624 trace_event_enable_disable(file, 1, 0);
1626 mutex_unlock(&event_mutex);
1629 static __init void setup_boot_kprobe_events(void)
1631 char *p, *cmd = kprobe_boot_events_buf;
1634 strreplace(kprobe_boot_events_buf, ',', ' ');
1636 while (cmd && *cmd != '\0') {
1637 p = strchr(cmd, ';');
1641 ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1643 pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1645 kprobe_boot_events_enabled = true;
1650 enable_boot_kprobe_events();
1653 /* Make a tracefs interface for controlling probe points */
1654 static __init int init_kprobe_trace(void)
1656 struct dentry *d_tracer;
1657 struct dentry *entry;
1660 ret = dyn_event_register(&trace_kprobe_ops);
1664 if (register_module_notifier(&trace_kprobe_module_nb))
1667 d_tracer = tracing_init_dentry();
1668 if (IS_ERR(d_tracer))
1671 entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1672 NULL, &kprobe_events_ops);
1674 /* Event list interface */
1676 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1678 /* Profile interface */
1679 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1680 NULL, &kprobe_profile_ops);
1683 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1685 setup_boot_kprobe_events();
1689 fs_initcall(init_kprobe_trace);
1692 #ifdef CONFIG_FTRACE_STARTUP_TEST
1693 static __init struct trace_event_file *
1694 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1696 struct trace_event_file *file;
1698 list_for_each_entry(file, &tr->events, list)
1699 if (file->event_call == trace_probe_event_call(&tk->tp))
1706 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1707 * stage, we can do this lockless.
1709 static __init int kprobe_trace_self_tests_init(void)
1712 int (*target)(int, int, int, int, int, int);
1713 struct trace_kprobe *tk;
1714 struct trace_event_file *file;
1716 if (tracing_is_disabled())
1719 if (kprobe_boot_events_enabled) {
1720 pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
1724 target = kprobe_trace_selftest_target;
1726 pr_info("Testing kprobe tracing: ");
1728 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1729 create_or_delete_trace_kprobe);
1730 if (WARN_ON_ONCE(ret)) {
1731 pr_warn("error on probing function entry.\n");
1734 /* Enable trace point */
1735 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1736 if (WARN_ON_ONCE(tk == NULL)) {
1737 pr_warn("error on getting new probe.\n");
1740 file = find_trace_probe_file(tk, top_trace_array());
1741 if (WARN_ON_ONCE(file == NULL)) {
1742 pr_warn("error on getting probe file.\n");
1745 enable_trace_kprobe(
1746 trace_probe_event_call(&tk->tp), file);
1750 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
1751 create_or_delete_trace_kprobe);
1752 if (WARN_ON_ONCE(ret)) {
1753 pr_warn("error on probing function return.\n");
1756 /* Enable trace point */
1757 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1758 if (WARN_ON_ONCE(tk == NULL)) {
1759 pr_warn("error on getting 2nd new probe.\n");
1762 file = find_trace_probe_file(tk, top_trace_array());
1763 if (WARN_ON_ONCE(file == NULL)) {
1764 pr_warn("error on getting probe file.\n");
1767 enable_trace_kprobe(
1768 trace_probe_event_call(&tk->tp), file);
1775 ret = target(1, 2, 3, 4, 5, 6);
1778 * Not expecting an error here, the check is only to prevent the
1779 * optimizer from removing the call to target() as otherwise there
1780 * are no side-effects and the call is never performed.
1785 /* Disable trace points before removing it */
1786 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1787 if (WARN_ON_ONCE(tk == NULL)) {
1788 pr_warn("error on getting test probe.\n");
1791 if (trace_kprobe_nhit(tk) != 1) {
1792 pr_warn("incorrect number of testprobe hits\n");
1796 file = find_trace_probe_file(tk, top_trace_array());
1797 if (WARN_ON_ONCE(file == NULL)) {
1798 pr_warn("error on getting probe file.\n");
1801 disable_trace_kprobe(
1802 trace_probe_event_call(&tk->tp), file);
1805 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1806 if (WARN_ON_ONCE(tk == NULL)) {
1807 pr_warn("error on getting 2nd test probe.\n");
1810 if (trace_kprobe_nhit(tk) != 1) {
1811 pr_warn("incorrect number of testprobe2 hits\n");
1815 file = find_trace_probe_file(tk, top_trace_array());
1816 if (WARN_ON_ONCE(file == NULL)) {
1817 pr_warn("error on getting probe file.\n");
1820 disable_trace_kprobe(
1821 trace_probe_event_call(&tk->tp), file);
1824 ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
1825 if (WARN_ON_ONCE(ret)) {
1826 pr_warn("error on deleting a probe.\n");
1830 ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
1831 if (WARN_ON_ONCE(ret)) {
1832 pr_warn("error on deleting a probe.\n");
1837 ret = dyn_events_release_all(&trace_kprobe_ops);
1838 if (WARN_ON_ONCE(ret)) {
1839 pr_warn("error on cleaning up probes.\n");
1843 * Wait for the optimizer work to finish. Otherwise it might fiddle
1844 * with probes in already freed __init text.
1846 wait_for_kprobe_optimizer();
1848 pr_cont("NG: Some tests are failed. Please check them.\n");
1854 late_initcall(kprobe_trace_self_tests_init);