1 // SPDX-License-Identifier: GPL-2.0
3 * Kprobes-based tracing events
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
8 #define pr_fmt(fmt) "trace_kprobe: " fmt
10 #include <linux/security.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/rculist.h>
14 #include <linux/error-injection.h>
16 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
18 #include "trace_dynevent.h"
19 #include "trace_kprobe_selftest.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
23 #define KPROBE_EVENT_SYSTEM "kprobes"
24 #define KRETPROBE_MAXACTIVE_MAX 4096
26 /* Kprobe early definition from command line */
27 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
29 static int __init set_kprobe_boot_events(char *str)
31 strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
32 disable_tracing_selftest("running kprobe events");
36 __setup("kprobe_event=", set_kprobe_boot_events);
38 static int trace_kprobe_create(int argc, const char **argv);
39 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
40 static int trace_kprobe_release(struct dyn_event *ev);
41 static bool trace_kprobe_is_busy(struct dyn_event *ev);
42 static bool trace_kprobe_match(const char *system, const char *event,
43 int argc, const char **argv, struct dyn_event *ev);
45 static struct dyn_event_operations trace_kprobe_ops = {
46 .create = trace_kprobe_create,
47 .show = trace_kprobe_show,
48 .is_busy = trace_kprobe_is_busy,
49 .free = trace_kprobe_release,
50 .match = trace_kprobe_match,
54 * Kprobe event core functions
57 struct dyn_event devent;
58 struct kretprobe rp; /* Use rp.kp for kprobe use */
59 unsigned long __percpu *nhit;
60 const char *symbol; /* symbol name */
61 struct trace_probe tp;
64 static bool is_trace_kprobe(struct dyn_event *ev)
66 return ev->ops == &trace_kprobe_ops;
69 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
71 return container_of(ev, struct trace_kprobe, devent);
75 * for_each_trace_kprobe - iterate over the trace_kprobe list
76 * @pos: the struct trace_kprobe * for each entry
77 * @dpos: the struct dyn_event * to use as a loop cursor
79 #define for_each_trace_kprobe(pos, dpos) \
80 for_each_dyn_event(dpos) \
81 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
83 #define SIZEOF_TRACE_KPROBE(n) \
84 (offsetof(struct trace_kprobe, tp.args) + \
85 (sizeof(struct probe_arg) * (n)))
87 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
89 return tk->rp.handler != NULL;
92 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
94 return tk->symbol ? tk->symbol : "unknown";
97 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
99 return tk->rp.kp.offset;
102 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
104 return !!(kprobe_gone(&tk->rp.kp));
107 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
110 int len = strlen(module_name(mod));
111 const char *name = trace_kprobe_symbol(tk);
113 return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
116 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
123 p = strchr(tk->symbol, ':');
127 mutex_lock(&module_mutex);
128 ret = !!find_module(tk->symbol);
129 mutex_unlock(&module_mutex);
135 static bool trace_kprobe_is_busy(struct dyn_event *ev)
137 struct trace_kprobe *tk = to_trace_kprobe(ev);
139 return trace_probe_is_enabled(&tk->tp);
142 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
143 int argc, const char **argv)
145 char buf[MAX_ARGSTR_LEN + 1];
151 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
152 else if (tk->rp.kp.offset)
153 snprintf(buf, sizeof(buf), "%s+%u",
154 trace_kprobe_symbol(tk), tk->rp.kp.offset);
156 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
157 if (strcmp(buf, argv[0]))
161 return trace_probe_match_command_args(&tk->tp, argc, argv);
164 static bool trace_kprobe_match(const char *system, const char *event,
165 int argc, const char **argv, struct dyn_event *ev)
167 struct trace_kprobe *tk = to_trace_kprobe(ev);
169 return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
170 (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
171 trace_kprobe_match_command_head(tk, argc, argv);
174 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
176 unsigned long nhit = 0;
179 for_each_possible_cpu(cpu)
180 nhit += *per_cpu_ptr(tk->nhit, cpu);
185 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
187 return !(list_empty(&tk->rp.kp.list) &&
188 hlist_unhashed(&tk->rp.kp.hlist));
191 /* Return 0 if it fails to find the symbol address */
192 static nokprobe_inline
193 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
198 addr = (unsigned long)
199 kallsyms_lookup_name(trace_kprobe_symbol(tk));
201 addr += tk->rp.kp.offset;
203 addr = (unsigned long)tk->rp.kp.addr;
208 static nokprobe_inline struct trace_kprobe *
209 trace_kprobe_primary_from_call(struct trace_event_call *call)
211 struct trace_probe *tp;
213 tp = trace_probe_primary_from_call(call);
214 if (WARN_ON_ONCE(!tp))
217 return container_of(tp, struct trace_kprobe, tp);
220 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
222 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
224 return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
225 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
226 tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
229 bool trace_kprobe_error_injectable(struct trace_event_call *call)
231 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
233 return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
237 static int register_kprobe_event(struct trace_kprobe *tk);
238 static int unregister_kprobe_event(struct trace_kprobe *tk);
240 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
241 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
242 struct pt_regs *regs);
244 static void free_trace_kprobe(struct trace_kprobe *tk)
247 trace_probe_cleanup(&tk->tp);
249 free_percpu(tk->nhit);
255 * Allocate new trace_probe and initialize it (including kprobes).
257 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
263 int nargs, bool is_return)
265 struct trace_kprobe *tk;
268 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
272 tk->nhit = alloc_percpu(unsigned long);
277 tk->symbol = kstrdup(symbol, GFP_KERNEL);
280 tk->rp.kp.symbol_name = tk->symbol;
281 tk->rp.kp.offset = offs;
283 tk->rp.kp.addr = addr;
286 tk->rp.handler = kretprobe_dispatcher;
288 tk->rp.kp.pre_handler = kprobe_dispatcher;
290 tk->rp.maxactive = maxactive;
291 INIT_HLIST_NODE(&tk->rp.kp.hlist);
292 INIT_LIST_HEAD(&tk->rp.kp.list);
294 ret = trace_probe_init(&tk->tp, event, group, false);
298 dyn_event_init(&tk->devent, &trace_kprobe_ops);
301 free_trace_kprobe(tk);
305 static struct trace_kprobe *find_trace_kprobe(const char *event,
308 struct dyn_event *pos;
309 struct trace_kprobe *tk;
311 for_each_trace_kprobe(tk, pos)
312 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
313 strcmp(trace_probe_group_name(&tk->tp), group) == 0)
318 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
322 if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
323 if (trace_kprobe_is_return(tk))
324 ret = enable_kretprobe(&tk->rp);
326 ret = enable_kprobe(&tk->rp.kp);
332 static void __disable_trace_kprobe(struct trace_probe *tp)
334 struct trace_probe *pos;
335 struct trace_kprobe *tk;
337 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
338 tk = container_of(pos, struct trace_kprobe, tp);
339 if (!trace_kprobe_is_registered(tk))
341 if (trace_kprobe_is_return(tk))
342 disable_kretprobe(&tk->rp);
344 disable_kprobe(&tk->rp.kp);
350 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
352 static int enable_trace_kprobe(struct trace_event_call *call,
353 struct trace_event_file *file)
355 struct trace_probe *pos, *tp;
356 struct trace_kprobe *tk;
360 tp = trace_probe_primary_from_call(call);
361 if (WARN_ON_ONCE(!tp))
363 enabled = trace_probe_is_enabled(tp);
365 /* This also changes "enabled" state */
367 ret = trace_probe_add_file(tp, file);
371 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
376 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
377 tk = container_of(pos, struct trace_kprobe, tp);
378 if (trace_kprobe_has_gone(tk))
380 ret = __enable_trace_kprobe(tk);
387 /* Failed to enable one of them. Roll back all */
389 __disable_trace_kprobe(tp);
391 trace_probe_remove_file(tp, file);
393 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
400 * Disable trace_probe
401 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
403 static int disable_trace_kprobe(struct trace_event_call *call,
404 struct trace_event_file *file)
406 struct trace_probe *tp;
408 tp = trace_probe_primary_from_call(call);
409 if (WARN_ON_ONCE(!tp))
413 if (!trace_probe_get_file_link(tp, file))
415 if (!trace_probe_has_single_file(tp))
417 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
419 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
421 if (!trace_probe_is_enabled(tp))
422 __disable_trace_kprobe(tp);
427 * Synchronization is done in below function. For perf event,
428 * file == NULL and perf_trace_event_unreg() calls
429 * tracepoint_synchronize_unregister() to ensure synchronize
430 * event. We don't need to care about it.
432 trace_probe_remove_file(tp, file);
437 #if defined(CONFIG_DYNAMIC_FTRACE) && \
438 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
439 static bool __within_notrace_func(unsigned long addr)
441 unsigned long offset, size;
443 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
446 /* Get the entry address of the target function */
450 * Since ftrace_location_range() does inclusive range check, we need
451 * to subtract 1 byte from the end address.
453 return !ftrace_location_range(addr, addr + size - 1);
456 static bool within_notrace_func(struct trace_kprobe *tk)
458 unsigned long addr = trace_kprobe_address(tk);
459 char symname[KSYM_NAME_LEN], *p;
461 if (!__within_notrace_func(addr))
464 /* Check if the address is on a suffixed-symbol */
465 if (!lookup_symbol_name(addr, symname)) {
466 p = strchr(symname, '.');
470 addr = (unsigned long)kprobe_lookup_name(symname, 0);
472 return __within_notrace_func(addr);
478 #define within_notrace_func(tk) (false)
481 /* Internal register function - just handle k*probes and flags */
482 static int __register_trace_kprobe(struct trace_kprobe *tk)
486 ret = security_locked_down(LOCKDOWN_KPROBES);
490 if (trace_kprobe_is_registered(tk))
493 if (within_notrace_func(tk)) {
494 pr_warn("Could not probe notrace function %s\n",
495 trace_kprobe_symbol(tk));
499 for (i = 0; i < tk->tp.nr_args; i++) {
500 ret = traceprobe_update_arg(&tk->tp.args[i]);
505 /* Set/clear disabled flag according to tp->flag */
506 if (trace_probe_is_enabled(&tk->tp))
507 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
509 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
511 if (trace_kprobe_is_return(tk))
512 ret = register_kretprobe(&tk->rp);
514 ret = register_kprobe(&tk->rp.kp);
519 /* Internal unregister function - just handle k*probes and flags */
520 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
522 if (trace_kprobe_is_registered(tk)) {
523 if (trace_kprobe_is_return(tk))
524 unregister_kretprobe(&tk->rp);
526 unregister_kprobe(&tk->rp.kp);
527 /* Cleanup kprobe for reuse and mark it unregistered */
528 INIT_HLIST_NODE(&tk->rp.kp.hlist);
529 INIT_LIST_HEAD(&tk->rp.kp.list);
530 if (tk->rp.kp.symbol_name)
531 tk->rp.kp.addr = NULL;
535 /* Unregister a trace_probe and probe_event */
536 static int unregister_trace_kprobe(struct trace_kprobe *tk)
538 /* If other probes are on the event, just unregister kprobe */
539 if (trace_probe_has_sibling(&tk->tp))
542 /* Enabled event can not be unregistered */
543 if (trace_probe_is_enabled(&tk->tp))
546 /* Will fail if probe is being used by ftrace or perf */
547 if (unregister_kprobe_event(tk))
551 __unregister_trace_kprobe(tk);
552 dyn_event_remove(&tk->devent);
553 trace_probe_unlink(&tk->tp);
558 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
559 struct trace_kprobe *comp)
561 struct trace_probe_event *tpe = orig->tp.event;
562 struct trace_probe *pos;
565 list_for_each_entry(pos, &tpe->probes, list) {
566 orig = container_of(pos, struct trace_kprobe, tp);
567 if (strcmp(trace_kprobe_symbol(orig),
568 trace_kprobe_symbol(comp)) ||
569 trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
573 * trace_probe_compare_arg_type() ensured that nr_args and
574 * each argument name and type are same. Let's compare comm.
576 for (i = 0; i < orig->tp.nr_args; i++) {
577 if (strcmp(orig->tp.args[i].comm,
578 comp->tp.args[i].comm))
582 if (i == orig->tp.nr_args)
589 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
593 ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
595 /* Note that argument starts index = 2 */
596 trace_probe_log_set_index(ret + 1);
597 trace_probe_log_err(0, DIFF_ARG_TYPE);
600 if (trace_kprobe_has_same_kprobe(to, tk)) {
601 trace_probe_log_set_index(0);
602 trace_probe_log_err(0, SAME_PROBE);
606 /* Append to existing event */
607 ret = trace_probe_append(&tk->tp, &to->tp);
611 /* Register k*probe */
612 ret = __register_trace_kprobe(tk);
613 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
614 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
619 trace_probe_unlink(&tk->tp);
621 dyn_event_add(&tk->devent);
626 /* Register a trace_probe and probe_event */
627 static int register_trace_kprobe(struct trace_kprobe *tk)
629 struct trace_kprobe *old_tk;
632 mutex_lock(&event_mutex);
634 old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
635 trace_probe_group_name(&tk->tp));
637 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
638 trace_probe_log_set_index(0);
639 trace_probe_log_err(0, DIFF_PROBE_TYPE);
642 ret = append_trace_kprobe(tk, old_tk);
647 /* Register new event */
648 ret = register_kprobe_event(tk);
650 pr_warn("Failed to register probe event(%d)\n", ret);
654 /* Register k*probe */
655 ret = __register_trace_kprobe(tk);
656 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
657 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
662 unregister_kprobe_event(tk);
664 dyn_event_add(&tk->devent);
667 mutex_unlock(&event_mutex);
671 /* Module notifier call back, checking event on the module */
672 static int trace_kprobe_module_callback(struct notifier_block *nb,
673 unsigned long val, void *data)
675 struct module *mod = data;
676 struct dyn_event *pos;
677 struct trace_kprobe *tk;
680 if (val != MODULE_STATE_COMING)
683 /* Update probes on coming module */
684 mutex_lock(&event_mutex);
685 for_each_trace_kprobe(tk, pos) {
686 if (trace_kprobe_within_module(tk, mod)) {
687 /* Don't need to check busy - this should have gone. */
688 __unregister_trace_kprobe(tk);
689 ret = __register_trace_kprobe(tk);
691 pr_warn("Failed to re-register probe %s on %s: %d\n",
692 trace_probe_name(&tk->tp),
693 module_name(mod), ret);
696 mutex_unlock(&event_mutex);
701 static struct notifier_block trace_kprobe_module_nb = {
702 .notifier_call = trace_kprobe_module_callback,
703 .priority = 1 /* Invoked after kprobe module callback */
706 /* Convert certain expected symbols into '_' when generating event names */
707 static inline void sanitize_event_name(char *name)
709 while (*name++ != '\0')
710 if (*name == ':' || *name == '.')
714 static int trace_kprobe_create(int argc, const char *argv[])
719 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
721 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
723 * p:[GRP/]EVENT] [MOD:]KSYM[+0]%return [FETCHARGS]
726 * $retval : fetch return value
727 * $stack : fetch stack address
728 * $stackN : fetch Nth of stack (N:0-)
729 * $comm : fetch current task comm
730 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
731 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
732 * %REG : fetch register REG
733 * Dereferencing memory fetch:
734 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
735 * Alias name of args:
736 * NAME=FETCHARG : set NAME as alias of FETCHARG.
738 * FETCHARG:TYPE : use TYPE instead of unsigned long.
740 struct trace_kprobe *tk = NULL;
742 bool is_return = false;
743 char *symbol = NULL, *tmp = NULL;
744 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
748 char buf[MAX_EVENT_NAME_LEN];
749 unsigned int flags = TPARG_FL_KERNEL;
751 switch (argv[0][0]) {
763 trace_probe_log_init("trace_kprobe", argc, argv);
765 event = strchr(&argv[0][1], ':');
769 if (isdigit(argv[0][1])) {
771 trace_probe_log_err(1, MAXACT_NO_KPROBE);
775 len = event - &argv[0][1] - 1;
777 len = strlen(&argv[0][1]);
778 if (len > MAX_EVENT_NAME_LEN - 1) {
779 trace_probe_log_err(1, BAD_MAXACT);
782 memcpy(buf, &argv[0][1], len);
784 ret = kstrtouint(buf, 0, &maxactive);
785 if (ret || !maxactive) {
786 trace_probe_log_err(1, BAD_MAXACT);
789 /* kretprobes instances are iterated over via a list. The
790 * maximum should stay reasonable.
792 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
793 trace_probe_log_err(1, MAXACT_TOO_BIG);
798 /* try to parse an address. if that fails, try to read the
799 * input as a symbol. */
800 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
801 trace_probe_log_set_index(1);
802 /* Check whether uprobe event specified */
803 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
807 /* a symbol specified */
808 symbol = kstrdup(argv[1], GFP_KERNEL);
812 tmp = strchr(symbol, '%');
814 if (!strcmp(tmp, "%return")) {
818 trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
823 /* TODO: support .init module functions */
824 ret = traceprobe_split_symbol_offset(symbol, &offset);
825 if (ret || offset < 0 || offset > UINT_MAX) {
826 trace_probe_log_err(0, BAD_PROBE_ADDR);
830 flags |= TPARG_FL_RETURN;
831 ret = kprobe_on_func_entry(NULL, symbol, offset);
833 flags |= TPARG_FL_FENTRY;
834 /* Defer the ENOENT case until register kprobe */
835 if (ret == -EINVAL && is_return) {
836 trace_probe_log_err(0, BAD_RETPROBE);
841 trace_probe_log_set_index(0);
843 ret = traceprobe_parse_event_name(&event, &group, buf,
848 /* Make a new event name */
850 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
851 is_return ? 'r' : 'p', symbol, offset);
853 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
854 is_return ? 'r' : 'p', addr);
855 sanitize_event_name(buf);
860 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
861 argc - 2, is_return);
864 /* This must return -ENOMEM, else there is a bug */
865 WARN_ON_ONCE(ret != -ENOMEM);
866 goto out; /* We know tk is not allocated */
868 argc -= 2; argv += 2;
870 /* parse arguments */
871 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
872 tmp = kstrdup(argv[i], GFP_KERNEL);
878 trace_probe_log_set_index(i + 2);
879 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
882 goto error; /* This can be -ENOMEM */
885 ret = traceprobe_set_print_fmt(&tk->tp, is_return);
889 ret = register_trace_kprobe(tk);
891 trace_probe_log_set_index(1);
893 trace_probe_log_err(0, BAD_INSN_BNDRY);
894 else if (ret == -ENOENT)
895 trace_probe_log_err(0, BAD_PROBE_ADDR);
896 else if (ret != -ENOMEM && ret != -EEXIST)
897 trace_probe_log_err(0, FAIL_REG_PROBE);
902 trace_probe_log_clear();
909 free_trace_kprobe(tk);
913 static int create_or_delete_trace_kprobe(int argc, char **argv)
917 if (argv[0][0] == '-')
918 return dyn_event_release(argc, argv, &trace_kprobe_ops);
920 ret = trace_kprobe_create(argc, (const char **)argv);
921 return ret == -ECANCELED ? -EINVAL : ret;
924 static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
926 return trace_run_command(cmd->seq.buffer, create_or_delete_trace_kprobe);
930 * kprobe_event_cmd_init - Initialize a kprobe event command object
931 * @cmd: A pointer to the dynevent_cmd struct representing the new event
932 * @buf: A pointer to the buffer used to build the command
933 * @maxlen: The length of the buffer passed in @buf
935 * Initialize a synthetic event command object. Use this before
936 * calling any of the other kprobe_event functions.
938 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
940 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
941 trace_kprobe_run_command);
943 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
946 * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
947 * @cmd: A pointer to the dynevent_cmd struct representing the new event
948 * @name: The name of the kprobe event
949 * @loc: The location of the kprobe event
950 * @kretprobe: Is this a return probe?
951 * @args: Variable number of arg (pairs), one pair for each field
953 * NOTE: Users normally won't want to call this function directly, but
954 * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
955 * adds a NULL to the end of the arg list. If this function is used
956 * directly, make sure the last arg in the variable arg list is NULL.
958 * Generate a kprobe event command to be executed by
959 * kprobe_event_gen_cmd_end(). This function can be used to generate the
960 * complete command or only the first part of it; in the latter case,
961 * kprobe_event_add_fields() can be used to add more fields following this.
963 * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
964 * returns -EINVAL if @loc == NULL.
966 * Return: 0 if successful, error otherwise.
968 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
969 const char *name, const char *loc, ...)
971 char buf[MAX_EVENT_NAME_LEN];
972 struct dynevent_arg arg;
976 if (cmd->type != DYNEVENT_TYPE_KPROBE)
983 snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
985 snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
987 ret = dynevent_str_add(cmd, buf);
991 dynevent_arg_init(&arg, 0);
993 ret = dynevent_arg_add(cmd, &arg, NULL);
1001 field = va_arg(args, const char *);
1005 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1011 ret = dynevent_arg_add(cmd, &arg, NULL);
1019 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1022 * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1023 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1024 * @args: Variable number of arg (pairs), one pair for each field
1026 * NOTE: Users normally won't want to call this function directly, but
1027 * rather use the kprobe_event_add_fields() wrapper, which
1028 * automatically adds a NULL to the end of the arg list. If this
1029 * function is used directly, make sure the last arg in the variable
1032 * Add probe fields to an existing kprobe command using a variable
1033 * list of args. Fields are added in the same order they're listed.
1035 * Return: 0 if successful, error otherwise.
1037 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1039 struct dynevent_arg arg;
1043 if (cmd->type != DYNEVENT_TYPE_KPROBE)
1046 dynevent_arg_init(&arg, 0);
1048 va_start(args, cmd);
1052 field = va_arg(args, const char *);
1056 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1062 ret = dynevent_arg_add(cmd, &arg, NULL);
1070 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1073 * kprobe_event_delete - Delete a kprobe event
1074 * @name: The name of the kprobe event to delete
1076 * Delete a kprobe event with the give @name from kernel code rather
1077 * than directly from the command line.
1079 * Return: 0 if successful, error otherwise.
1081 int kprobe_event_delete(const char *name)
1083 char buf[MAX_EVENT_NAME_LEN];
1085 snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1087 return trace_run_command(buf, create_or_delete_trace_kprobe);
1089 EXPORT_SYMBOL_GPL(kprobe_event_delete);
1091 static int trace_kprobe_release(struct dyn_event *ev)
1093 struct trace_kprobe *tk = to_trace_kprobe(ev);
1094 int ret = unregister_trace_kprobe(tk);
1097 free_trace_kprobe(tk);
1101 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1103 struct trace_kprobe *tk = to_trace_kprobe(ev);
1106 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1107 if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1108 seq_printf(m, "%d", tk->rp.maxactive);
1109 seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1110 trace_probe_name(&tk->tp));
1113 seq_printf(m, " 0x%p", tk->rp.kp.addr);
1114 else if (tk->rp.kp.offset)
1115 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1118 seq_printf(m, " %s", trace_kprobe_symbol(tk));
1120 for (i = 0; i < tk->tp.nr_args; i++)
1121 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1127 static int probes_seq_show(struct seq_file *m, void *v)
1129 struct dyn_event *ev = v;
1131 if (!is_trace_kprobe(ev))
1134 return trace_kprobe_show(m, ev);
1137 static const struct seq_operations probes_seq_op = {
1138 .start = dyn_event_seq_start,
1139 .next = dyn_event_seq_next,
1140 .stop = dyn_event_seq_stop,
1141 .show = probes_seq_show
1144 static int probes_open(struct inode *inode, struct file *file)
1148 ret = security_locked_down(LOCKDOWN_TRACEFS);
1152 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1153 ret = dyn_events_release_all(&trace_kprobe_ops);
1158 return seq_open(file, &probes_seq_op);
1161 static ssize_t probes_write(struct file *file, const char __user *buffer,
1162 size_t count, loff_t *ppos)
1164 return trace_parse_run_command(file, buffer, count, ppos,
1165 create_or_delete_trace_kprobe);
1168 static const struct file_operations kprobe_events_ops = {
1169 .owner = THIS_MODULE,
1170 .open = probes_open,
1172 .llseek = seq_lseek,
1173 .release = seq_release,
1174 .write = probes_write,
1177 /* Probes profiling interfaces */
1178 static int probes_profile_seq_show(struct seq_file *m, void *v)
1180 struct dyn_event *ev = v;
1181 struct trace_kprobe *tk;
1183 if (!is_trace_kprobe(ev))
1186 tk = to_trace_kprobe(ev);
1187 seq_printf(m, " %-44s %15lu %15lu\n",
1188 trace_probe_name(&tk->tp),
1189 trace_kprobe_nhit(tk),
1195 static const struct seq_operations profile_seq_op = {
1196 .start = dyn_event_seq_start,
1197 .next = dyn_event_seq_next,
1198 .stop = dyn_event_seq_stop,
1199 .show = probes_profile_seq_show
1202 static int profile_open(struct inode *inode, struct file *file)
1206 ret = security_locked_down(LOCKDOWN_TRACEFS);
1210 return seq_open(file, &profile_seq_op);
1213 static const struct file_operations kprobe_profile_ops = {
1214 .owner = THIS_MODULE,
1215 .open = profile_open,
1217 .llseek = seq_lseek,
1218 .release = seq_release,
1221 /* Kprobe specific fetch functions */
1223 /* Return the length of string -- including null terminal byte */
1224 static nokprobe_inline int
1225 fetch_store_strlen_user(unsigned long addr)
1227 const void __user *uaddr = (__force const void __user *)addr;
1229 return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
1232 /* Return the length of string -- including null terminal byte */
1233 static nokprobe_inline int
1234 fetch_store_strlen(unsigned long addr)
1239 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1240 if (addr < TASK_SIZE)
1241 return fetch_store_strlen_user(addr);
1245 ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
1247 } while (c && ret == 0 && len < MAX_STRING_SIZE);
1249 return (ret < 0) ? ret : len;
1253 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1254 * with max length and relative data location.
1256 static nokprobe_inline int
1257 fetch_store_string_user(unsigned long addr, void *dest, void *base)
1259 const void __user *uaddr = (__force const void __user *)addr;
1260 int maxlen = get_loc_len(*(u32 *)dest);
1264 if (unlikely(!maxlen))
1267 __dest = get_loc_data(dest, base);
1269 ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
1271 *(u32 *)dest = make_data_loc(ret, __dest - base);
1277 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1278 * length and relative data location.
1280 static nokprobe_inline int
1281 fetch_store_string(unsigned long addr, void *dest, void *base)
1283 int maxlen = get_loc_len(*(u32 *)dest);
1287 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1288 if ((unsigned long)addr < TASK_SIZE)
1289 return fetch_store_string_user(addr, dest, base);
1292 if (unlikely(!maxlen))
1295 __dest = get_loc_data(dest, base);
1298 * Try to get string again, since the string can be changed while
1301 ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
1303 *(u32 *)dest = make_data_loc(ret, __dest - base);
1308 static nokprobe_inline int
1309 probe_mem_read_user(void *dest, void *src, size_t size)
1311 const void __user *uaddr = (__force const void __user *)src;
1313 return copy_from_user_nofault(dest, uaddr, size);
1316 static nokprobe_inline int
1317 probe_mem_read(void *dest, void *src, size_t size)
1319 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1320 if ((unsigned long)src < TASK_SIZE)
1321 return probe_mem_read_user(dest, src, size);
1323 return copy_from_kernel_nofault(dest, src, size);
1326 /* Note that we don't verify it, since the code does not come from user space */
1328 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
1334 /* 1st stage: get value from context */
1337 val = regs_get_register(regs, code->param);
1339 case FETCH_OP_STACK:
1340 val = regs_get_kernel_stack_nth(regs, code->param);
1342 case FETCH_OP_STACKP:
1343 val = kernel_stack_pointer(regs);
1345 case FETCH_OP_RETVAL:
1346 val = regs_return_value(regs);
1349 val = code->immediate;
1352 val = (unsigned long)current->comm;
1355 val = (unsigned long)code->data;
1357 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1359 val = regs_get_kernel_argument(regs, code->param);
1362 case FETCH_NOP_SYMBOL: /* Ignore a place holder */
1370 return process_fetch_insn_bottom(code, val, dest, base);
1372 NOKPROBE_SYMBOL(process_fetch_insn)
1374 /* Kprobe handler */
1375 static nokprobe_inline void
1376 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1377 struct trace_event_file *trace_file)
1379 struct kprobe_trace_entry_head *entry;
1380 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1381 struct trace_event_buffer fbuffer;
1384 WARN_ON(call != trace_file->event_call);
1386 if (trace_trigger_soft_disabled(trace_file))
1389 local_save_flags(fbuffer.flags);
1390 fbuffer.pc = preempt_count();
1391 fbuffer.trace_file = trace_file;
1393 dsize = __get_data_size(&tk->tp, regs);
1396 trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1398 sizeof(*entry) + tk->tp.size + dsize,
1399 fbuffer.flags, fbuffer.pc);
1403 fbuffer.regs = regs;
1404 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1405 entry->ip = (unsigned long)tk->rp.kp.addr;
1406 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1408 trace_event_buffer_commit(&fbuffer);
1412 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1414 struct event_file_link *link;
1416 trace_probe_for_each_link_rcu(link, &tk->tp)
1417 __kprobe_trace_func(tk, regs, link->file);
1419 NOKPROBE_SYMBOL(kprobe_trace_func);
1421 /* Kretprobe handler */
1422 static nokprobe_inline void
1423 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1424 struct pt_regs *regs,
1425 struct trace_event_file *trace_file)
1427 struct kretprobe_trace_entry_head *entry;
1428 struct trace_event_buffer fbuffer;
1429 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1432 WARN_ON(call != trace_file->event_call);
1434 if (trace_trigger_soft_disabled(trace_file))
1437 local_save_flags(fbuffer.flags);
1438 fbuffer.pc = preempt_count();
1439 fbuffer.trace_file = trace_file;
1441 dsize = __get_data_size(&tk->tp, regs);
1443 trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1445 sizeof(*entry) + tk->tp.size + dsize,
1446 fbuffer.flags, fbuffer.pc);
1450 fbuffer.regs = regs;
1451 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1452 entry->func = (unsigned long)tk->rp.kp.addr;
1453 entry->ret_ip = (unsigned long)ri->ret_addr;
1454 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1456 trace_event_buffer_commit(&fbuffer);
1460 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1461 struct pt_regs *regs)
1463 struct event_file_link *link;
1465 trace_probe_for_each_link_rcu(link, &tk->tp)
1466 __kretprobe_trace_func(tk, ri, regs, link->file);
1468 NOKPROBE_SYMBOL(kretprobe_trace_func);
1470 /* Event entry printers */
1471 static enum print_line_t
1472 print_kprobe_event(struct trace_iterator *iter, int flags,
1473 struct trace_event *event)
1475 struct kprobe_trace_entry_head *field;
1476 struct trace_seq *s = &iter->seq;
1477 struct trace_probe *tp;
1479 field = (struct kprobe_trace_entry_head *)iter->ent;
1480 tp = trace_probe_primary_from_call(
1481 container_of(event, struct trace_event_call, event));
1482 if (WARN_ON_ONCE(!tp))
1485 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1487 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1490 trace_seq_putc(s, ')');
1492 if (print_probe_args(s, tp->args, tp->nr_args,
1493 (u8 *)&field[1], field) < 0)
1496 trace_seq_putc(s, '\n');
1498 return trace_handle_return(s);
1501 static enum print_line_t
1502 print_kretprobe_event(struct trace_iterator *iter, int flags,
1503 struct trace_event *event)
1505 struct kretprobe_trace_entry_head *field;
1506 struct trace_seq *s = &iter->seq;
1507 struct trace_probe *tp;
1509 field = (struct kretprobe_trace_entry_head *)iter->ent;
1510 tp = trace_probe_primary_from_call(
1511 container_of(event, struct trace_event_call, event));
1512 if (WARN_ON_ONCE(!tp))
1515 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1517 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1520 trace_seq_puts(s, " <- ");
1522 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1525 trace_seq_putc(s, ')');
1527 if (print_probe_args(s, tp->args, tp->nr_args,
1528 (u8 *)&field[1], field) < 0)
1531 trace_seq_putc(s, '\n');
1534 return trace_handle_return(s);
1538 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1541 struct kprobe_trace_entry_head field;
1542 struct trace_probe *tp;
1544 tp = trace_probe_primary_from_call(event_call);
1545 if (WARN_ON_ONCE(!tp))
1548 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1550 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1553 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1556 struct kretprobe_trace_entry_head field;
1557 struct trace_probe *tp;
1559 tp = trace_probe_primary_from_call(event_call);
1560 if (WARN_ON_ONCE(!tp))
1563 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1564 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1566 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1569 #ifdef CONFIG_PERF_EVENTS
1571 /* Kprobe profile handler */
1573 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1575 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1576 struct kprobe_trace_entry_head *entry;
1577 struct hlist_head *head;
1578 int size, __size, dsize;
1581 if (bpf_prog_array_valid(call)) {
1582 unsigned long orig_ip = instruction_pointer(regs);
1585 ret = trace_call_bpf(call, regs);
1588 * We need to check and see if we modified the pc of the
1589 * pt_regs, and if so return 1 so that we don't do the
1592 if (orig_ip != instruction_pointer(regs))
1598 head = this_cpu_ptr(call->perf_events);
1599 if (hlist_empty(head))
1602 dsize = __get_data_size(&tk->tp, regs);
1603 __size = sizeof(*entry) + tk->tp.size + dsize;
1604 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1605 size -= sizeof(u32);
1607 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1611 entry->ip = (unsigned long)tk->rp.kp.addr;
1612 memset(&entry[1], 0, dsize);
1613 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1614 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1618 NOKPROBE_SYMBOL(kprobe_perf_func);
1620 /* Kretprobe profile handler */
1622 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1623 struct pt_regs *regs)
1625 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1626 struct kretprobe_trace_entry_head *entry;
1627 struct hlist_head *head;
1628 int size, __size, dsize;
1631 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1634 head = this_cpu_ptr(call->perf_events);
1635 if (hlist_empty(head))
1638 dsize = __get_data_size(&tk->tp, regs);
1639 __size = sizeof(*entry) + tk->tp.size + dsize;
1640 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1641 size -= sizeof(u32);
1643 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1647 entry->func = (unsigned long)tk->rp.kp.addr;
1648 entry->ret_ip = (unsigned long)ri->ret_addr;
1649 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1650 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1653 NOKPROBE_SYMBOL(kretprobe_perf_func);
1655 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1656 const char **symbol, u64 *probe_offset,
1657 u64 *probe_addr, bool perf_type_tracepoint)
1659 const char *pevent = trace_event_name(event->tp_event);
1660 const char *group = event->tp_event->class->system;
1661 struct trace_kprobe *tk;
1663 if (perf_type_tracepoint)
1664 tk = find_trace_kprobe(pevent, group);
1666 tk = trace_kprobe_primary_from_call(event->tp_event);
1670 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1671 : BPF_FD_TYPE_KPROBE;
1673 *symbol = tk->symbol;
1674 *probe_offset = tk->rp.kp.offset;
1679 *probe_addr = (unsigned long)tk->rp.kp.addr;
1683 #endif /* CONFIG_PERF_EVENTS */
1686 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1688 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1689 * lockless, but we can't race with this __init function.
1691 static int kprobe_register(struct trace_event_call *event,
1692 enum trace_reg type, void *data)
1694 struct trace_event_file *file = data;
1697 case TRACE_REG_REGISTER:
1698 return enable_trace_kprobe(event, file);
1699 case TRACE_REG_UNREGISTER:
1700 return disable_trace_kprobe(event, file);
1702 #ifdef CONFIG_PERF_EVENTS
1703 case TRACE_REG_PERF_REGISTER:
1704 return enable_trace_kprobe(event, NULL);
1705 case TRACE_REG_PERF_UNREGISTER:
1706 return disable_trace_kprobe(event, NULL);
1707 case TRACE_REG_PERF_OPEN:
1708 case TRACE_REG_PERF_CLOSE:
1709 case TRACE_REG_PERF_ADD:
1710 case TRACE_REG_PERF_DEL:
1717 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1719 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1722 raw_cpu_inc(*tk->nhit);
1724 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1725 kprobe_trace_func(tk, regs);
1726 #ifdef CONFIG_PERF_EVENTS
1727 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1728 ret = kprobe_perf_func(tk, regs);
1732 NOKPROBE_SYMBOL(kprobe_dispatcher);
1735 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1737 struct kretprobe *rp = get_kretprobe(ri);
1738 struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
1740 raw_cpu_inc(*tk->nhit);
1742 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1743 kretprobe_trace_func(tk, ri, regs);
1744 #ifdef CONFIG_PERF_EVENTS
1745 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1746 kretprobe_perf_func(tk, ri, regs);
1748 return 0; /* We don't tweek kernel, so just return 0 */
1750 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1752 static struct trace_event_functions kretprobe_funcs = {
1753 .trace = print_kretprobe_event
1756 static struct trace_event_functions kprobe_funcs = {
1757 .trace = print_kprobe_event
1760 static struct trace_event_fields kretprobe_fields_array[] = {
1761 { .type = TRACE_FUNCTION_TYPE,
1762 .define_fields = kretprobe_event_define_fields },
1766 static struct trace_event_fields kprobe_fields_array[] = {
1767 { .type = TRACE_FUNCTION_TYPE,
1768 .define_fields = kprobe_event_define_fields },
1772 static inline void init_trace_event_call(struct trace_kprobe *tk)
1774 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1776 if (trace_kprobe_is_return(tk)) {
1777 call->event.funcs = &kretprobe_funcs;
1778 call->class->fields_array = kretprobe_fields_array;
1780 call->event.funcs = &kprobe_funcs;
1781 call->class->fields_array = kprobe_fields_array;
1784 call->flags = TRACE_EVENT_FL_KPROBE;
1785 call->class->reg = kprobe_register;
1788 static int register_kprobe_event(struct trace_kprobe *tk)
1790 init_trace_event_call(tk);
1792 return trace_probe_register_event_call(&tk->tp);
1795 static int unregister_kprobe_event(struct trace_kprobe *tk)
1797 return trace_probe_unregister_event_call(&tk->tp);
1800 #ifdef CONFIG_PERF_EVENTS
1801 /* create a trace_kprobe, but don't add it to global lists */
1802 struct trace_event_call *
1803 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1806 struct trace_kprobe *tk;
1811 * local trace_kprobes are not added to dyn_event, so they are never
1812 * searched in find_trace_kprobe(). Therefore, there is no concern of
1813 * duplicated name here.
1815 event = func ? func : "DUMMY_EVENT";
1817 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1818 offs, 0 /* maxactive */, 0 /* nargs */,
1822 pr_info("Failed to allocate trace_probe.(%d)\n",
1824 return ERR_CAST(tk);
1827 init_trace_event_call(tk);
1829 if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1834 ret = __register_trace_kprobe(tk);
1838 return trace_probe_event_call(&tk->tp);
1840 free_trace_kprobe(tk);
1841 return ERR_PTR(ret);
1844 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1846 struct trace_kprobe *tk;
1848 tk = trace_kprobe_primary_from_call(event_call);
1852 if (trace_probe_is_enabled(&tk->tp)) {
1857 __unregister_trace_kprobe(tk);
1859 free_trace_kprobe(tk);
1861 #endif /* CONFIG_PERF_EVENTS */
1863 static __init void enable_boot_kprobe_events(void)
1865 struct trace_array *tr = top_trace_array();
1866 struct trace_event_file *file;
1867 struct trace_kprobe *tk;
1868 struct dyn_event *pos;
1870 mutex_lock(&event_mutex);
1871 for_each_trace_kprobe(tk, pos) {
1872 list_for_each_entry(file, &tr->events, list)
1873 if (file->event_call == trace_probe_event_call(&tk->tp))
1874 trace_event_enable_disable(file, 1, 0);
1876 mutex_unlock(&event_mutex);
1879 static __init void setup_boot_kprobe_events(void)
1881 char *p, *cmd = kprobe_boot_events_buf;
1884 strreplace(kprobe_boot_events_buf, ',', ' ');
1886 while (cmd && *cmd != '\0') {
1887 p = strchr(cmd, ';');
1891 ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1893 pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1898 enable_boot_kprobe_events();
1902 * Register dynevent at core_initcall. This allows kernel to setup kprobe
1903 * events in postcore_initcall without tracefs.
1905 static __init int init_kprobe_trace_early(void)
1909 ret = dyn_event_register(&trace_kprobe_ops);
1913 if (register_module_notifier(&trace_kprobe_module_nb))
1918 core_initcall(init_kprobe_trace_early);
1920 /* Make a tracefs interface for controlling probe points */
1921 static __init int init_kprobe_trace(void)
1924 struct dentry *entry;
1926 ret = tracing_init_dentry();
1930 entry = tracefs_create_file("kprobe_events", 0644, NULL,
1931 NULL, &kprobe_events_ops);
1933 /* Event list interface */
1935 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1937 /* Profile interface */
1938 entry = tracefs_create_file("kprobe_profile", 0444, NULL,
1939 NULL, &kprobe_profile_ops);
1942 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1944 setup_boot_kprobe_events();
1948 fs_initcall(init_kprobe_trace);
1951 #ifdef CONFIG_FTRACE_STARTUP_TEST
1952 static __init struct trace_event_file *
1953 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1955 struct trace_event_file *file;
1957 list_for_each_entry(file, &tr->events, list)
1958 if (file->event_call == trace_probe_event_call(&tk->tp))
1965 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1966 * stage, we can do this lockless.
1968 static __init int kprobe_trace_self_tests_init(void)
1971 int (*target)(int, int, int, int, int, int);
1972 struct trace_kprobe *tk;
1973 struct trace_event_file *file;
1975 if (tracing_is_disabled())
1978 if (tracing_selftest_disabled)
1981 target = kprobe_trace_selftest_target;
1983 pr_info("Testing kprobe tracing: ");
1985 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1986 create_or_delete_trace_kprobe);
1987 if (WARN_ON_ONCE(ret)) {
1988 pr_warn("error on probing function entry.\n");
1991 /* Enable trace point */
1992 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1993 if (WARN_ON_ONCE(tk == NULL)) {
1994 pr_warn("error on getting new probe.\n");
1997 file = find_trace_probe_file(tk, top_trace_array());
1998 if (WARN_ON_ONCE(file == NULL)) {
1999 pr_warn("error on getting probe file.\n");
2002 enable_trace_kprobe(
2003 trace_probe_event_call(&tk->tp), file);
2007 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
2008 create_or_delete_trace_kprobe);
2009 if (WARN_ON_ONCE(ret)) {
2010 pr_warn("error on probing function return.\n");
2013 /* Enable trace point */
2014 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2015 if (WARN_ON_ONCE(tk == NULL)) {
2016 pr_warn("error on getting 2nd new probe.\n");
2019 file = find_trace_probe_file(tk, top_trace_array());
2020 if (WARN_ON_ONCE(file == NULL)) {
2021 pr_warn("error on getting probe file.\n");
2024 enable_trace_kprobe(
2025 trace_probe_event_call(&tk->tp), file);
2032 ret = target(1, 2, 3, 4, 5, 6);
2035 * Not expecting an error here, the check is only to prevent the
2036 * optimizer from removing the call to target() as otherwise there
2037 * are no side-effects and the call is never performed.
2042 /* Disable trace points before removing it */
2043 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2044 if (WARN_ON_ONCE(tk == NULL)) {
2045 pr_warn("error on getting test probe.\n");
2048 if (trace_kprobe_nhit(tk) != 1) {
2049 pr_warn("incorrect number of testprobe hits\n");
2053 file = find_trace_probe_file(tk, top_trace_array());
2054 if (WARN_ON_ONCE(file == NULL)) {
2055 pr_warn("error on getting probe file.\n");
2058 disable_trace_kprobe(
2059 trace_probe_event_call(&tk->tp), file);
2062 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2063 if (WARN_ON_ONCE(tk == NULL)) {
2064 pr_warn("error on getting 2nd test probe.\n");
2067 if (trace_kprobe_nhit(tk) != 1) {
2068 pr_warn("incorrect number of testprobe2 hits\n");
2072 file = find_trace_probe_file(tk, top_trace_array());
2073 if (WARN_ON_ONCE(file == NULL)) {
2074 pr_warn("error on getting probe file.\n");
2077 disable_trace_kprobe(
2078 trace_probe_event_call(&tk->tp), file);
2081 ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
2082 if (WARN_ON_ONCE(ret)) {
2083 pr_warn("error on deleting a probe.\n");
2087 ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
2088 if (WARN_ON_ONCE(ret)) {
2089 pr_warn("error on deleting a probe.\n");
2094 ret = dyn_events_release_all(&trace_kprobe_ops);
2095 if (WARN_ON_ONCE(ret)) {
2096 pr_warn("error on cleaning up probes.\n");
2100 * Wait for the optimizer work to finish. Otherwise it might fiddle
2101 * with probes in already freed __init text.
2103 wait_for_kprobe_optimizer();
2105 pr_cont("NG: Some tests are failed. Please check them.\n");
2111 late_initcall(kprobe_trace_self_tests_init);