1 // SPDX-License-Identifier: GPL-2.0
3 * Kprobes-based tracing events
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
8 #define pr_fmt(fmt) "trace_kprobe: " fmt
10 #include <linux/bpf-cgroup.h>
11 #include <linux/security.h>
12 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/rculist.h>
15 #include <linux/error-injection.h>
17 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
19 #include "trace_dynevent.h"
20 #include "trace_kprobe_selftest.h"
21 #include "trace_probe.h"
22 #include "trace_probe_tmpl.h"
24 #define KPROBE_EVENT_SYSTEM "kprobes"
25 #define KRETPROBE_MAXACTIVE_MAX 4096
27 /* Kprobe early definition from command line */
28 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
30 static int __init set_kprobe_boot_events(char *str)
32 strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
33 disable_tracing_selftest("running kprobe events");
37 __setup("kprobe_event=", set_kprobe_boot_events);
39 static int trace_kprobe_create(const char *raw_command);
40 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
41 static int trace_kprobe_release(struct dyn_event *ev);
42 static bool trace_kprobe_is_busy(struct dyn_event *ev);
43 static bool trace_kprobe_match(const char *system, const char *event,
44 int argc, const char **argv, struct dyn_event *ev);
46 static struct dyn_event_operations trace_kprobe_ops = {
47 .create = trace_kprobe_create,
48 .show = trace_kprobe_show,
49 .is_busy = trace_kprobe_is_busy,
50 .free = trace_kprobe_release,
51 .match = trace_kprobe_match,
55 * Kprobe event core functions
58 struct dyn_event devent;
59 struct kretprobe rp; /* Use rp.kp for kprobe use */
60 unsigned long __percpu *nhit;
61 const char *symbol; /* symbol name */
62 struct trace_probe tp;
65 static bool is_trace_kprobe(struct dyn_event *ev)
67 return ev->ops == &trace_kprobe_ops;
70 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
72 return container_of(ev, struct trace_kprobe, devent);
76 * for_each_trace_kprobe - iterate over the trace_kprobe list
77 * @pos: the struct trace_kprobe * for each entry
78 * @dpos: the struct dyn_event * to use as a loop cursor
80 #define for_each_trace_kprobe(pos, dpos) \
81 for_each_dyn_event(dpos) \
82 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
84 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
86 return tk->rp.handler != NULL;
89 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
91 return tk->symbol ? tk->symbol : "unknown";
94 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
96 return tk->rp.kp.offset;
99 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
101 return kprobe_gone(&tk->rp.kp);
104 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
107 int len = strlen(module_name(mod));
108 const char *name = trace_kprobe_symbol(tk);
110 return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
113 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
120 p = strchr(tk->symbol, ':');
124 rcu_read_lock_sched();
125 ret = !!find_module(tk->symbol);
126 rcu_read_unlock_sched();
132 static bool trace_kprobe_is_busy(struct dyn_event *ev)
134 struct trace_kprobe *tk = to_trace_kprobe(ev);
136 return trace_probe_is_enabled(&tk->tp);
139 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
140 int argc, const char **argv)
142 char buf[MAX_ARGSTR_LEN + 1];
148 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
149 else if (tk->rp.kp.offset)
150 snprintf(buf, sizeof(buf), "%s+%u",
151 trace_kprobe_symbol(tk), tk->rp.kp.offset);
153 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
154 if (strcmp(buf, argv[0]))
158 return trace_probe_match_command_args(&tk->tp, argc, argv);
161 static bool trace_kprobe_match(const char *system, const char *event,
162 int argc, const char **argv, struct dyn_event *ev)
164 struct trace_kprobe *tk = to_trace_kprobe(ev);
166 return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
167 (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
168 trace_kprobe_match_command_head(tk, argc, argv);
171 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
173 unsigned long nhit = 0;
176 for_each_possible_cpu(cpu)
177 nhit += *per_cpu_ptr(tk->nhit, cpu);
182 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
184 return !(list_empty(&tk->rp.kp.list) &&
185 hlist_unhashed(&tk->rp.kp.hlist));
188 /* Return 0 if it fails to find the symbol address */
189 static nokprobe_inline
190 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
195 addr = (unsigned long)
196 kallsyms_lookup_name(trace_kprobe_symbol(tk));
198 addr += tk->rp.kp.offset;
200 addr = (unsigned long)tk->rp.kp.addr;
205 static nokprobe_inline struct trace_kprobe *
206 trace_kprobe_primary_from_call(struct trace_event_call *call)
208 struct trace_probe *tp;
210 tp = trace_probe_primary_from_call(call);
211 if (WARN_ON_ONCE(!tp))
214 return container_of(tp, struct trace_kprobe, tp);
217 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
219 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
221 return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
222 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
223 tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
226 bool trace_kprobe_error_injectable(struct trace_event_call *call)
228 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
230 return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
234 static int register_kprobe_event(struct trace_kprobe *tk);
235 static int unregister_kprobe_event(struct trace_kprobe *tk);
237 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
238 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
239 struct pt_regs *regs);
241 static void free_trace_kprobe(struct trace_kprobe *tk)
244 trace_probe_cleanup(&tk->tp);
246 free_percpu(tk->nhit);
252 * Allocate new trace_probe and initialize it (including kprobes).
254 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
260 int nargs, bool is_return)
262 struct trace_kprobe *tk;
265 tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
269 tk->nhit = alloc_percpu(unsigned long);
274 tk->symbol = kstrdup(symbol, GFP_KERNEL);
277 tk->rp.kp.symbol_name = tk->symbol;
278 tk->rp.kp.offset = offs;
280 tk->rp.kp.addr = addr;
283 tk->rp.handler = kretprobe_dispatcher;
285 tk->rp.kp.pre_handler = kprobe_dispatcher;
287 tk->rp.maxactive = maxactive;
288 INIT_HLIST_NODE(&tk->rp.kp.hlist);
289 INIT_LIST_HEAD(&tk->rp.kp.list);
291 ret = trace_probe_init(&tk->tp, event, group, false);
295 dyn_event_init(&tk->devent, &trace_kprobe_ops);
298 free_trace_kprobe(tk);
302 static struct trace_kprobe *find_trace_kprobe(const char *event,
305 struct dyn_event *pos;
306 struct trace_kprobe *tk;
308 for_each_trace_kprobe(tk, pos)
309 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
310 strcmp(trace_probe_group_name(&tk->tp), group) == 0)
315 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
319 if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
320 if (trace_kprobe_is_return(tk))
321 ret = enable_kretprobe(&tk->rp);
323 ret = enable_kprobe(&tk->rp.kp);
329 static void __disable_trace_kprobe(struct trace_probe *tp)
331 struct trace_kprobe *tk;
333 list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
334 if (!trace_kprobe_is_registered(tk))
336 if (trace_kprobe_is_return(tk))
337 disable_kretprobe(&tk->rp);
339 disable_kprobe(&tk->rp.kp);
345 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
347 static int enable_trace_kprobe(struct trace_event_call *call,
348 struct trace_event_file *file)
350 struct trace_probe *tp;
351 struct trace_kprobe *tk;
355 tp = trace_probe_primary_from_call(call);
356 if (WARN_ON_ONCE(!tp))
358 enabled = trace_probe_is_enabled(tp);
360 /* This also changes "enabled" state */
362 ret = trace_probe_add_file(tp, file);
366 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
371 list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
372 if (trace_kprobe_has_gone(tk))
374 ret = __enable_trace_kprobe(tk);
381 /* Failed to enable one of them. Roll back all */
383 __disable_trace_kprobe(tp);
385 trace_probe_remove_file(tp, file);
387 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
394 * Disable trace_probe
395 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
397 static int disable_trace_kprobe(struct trace_event_call *call,
398 struct trace_event_file *file)
400 struct trace_probe *tp;
402 tp = trace_probe_primary_from_call(call);
403 if (WARN_ON_ONCE(!tp))
407 if (!trace_probe_get_file_link(tp, file))
409 if (!trace_probe_has_single_file(tp))
411 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
413 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
415 if (!trace_probe_is_enabled(tp))
416 __disable_trace_kprobe(tp);
421 * Synchronization is done in below function. For perf event,
422 * file == NULL and perf_trace_event_unreg() calls
423 * tracepoint_synchronize_unregister() to ensure synchronize
424 * event. We don't need to care about it.
426 trace_probe_remove_file(tp, file);
431 #if defined(CONFIG_DYNAMIC_FTRACE) && \
432 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
433 static bool __within_notrace_func(unsigned long addr)
435 unsigned long offset, size;
437 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
440 /* Get the entry address of the target function */
444 * Since ftrace_location_range() does inclusive range check, we need
445 * to subtract 1 byte from the end address.
447 return !ftrace_location_range(addr, addr + size - 1);
450 static bool within_notrace_func(struct trace_kprobe *tk)
452 unsigned long addr = trace_kprobe_address(tk);
453 char symname[KSYM_NAME_LEN], *p;
455 if (!__within_notrace_func(addr))
458 /* Check if the address is on a suffixed-symbol */
459 if (!lookup_symbol_name(addr, symname)) {
460 p = strchr(symname, '.');
464 addr = (unsigned long)kprobe_lookup_name(symname, 0);
466 return __within_notrace_func(addr);
472 #define within_notrace_func(tk) (false)
475 /* Internal register function - just handle k*probes and flags */
476 static int __register_trace_kprobe(struct trace_kprobe *tk)
480 ret = security_locked_down(LOCKDOWN_KPROBES);
484 if (trace_kprobe_is_registered(tk))
487 if (within_notrace_func(tk)) {
488 pr_warn("Could not probe notrace function %s\n",
489 trace_kprobe_symbol(tk));
493 for (i = 0; i < tk->tp.nr_args; i++) {
494 ret = traceprobe_update_arg(&tk->tp.args[i]);
499 /* Set/clear disabled flag according to tp->flag */
500 if (trace_probe_is_enabled(&tk->tp))
501 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
503 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
505 if (trace_kprobe_is_return(tk))
506 ret = register_kretprobe(&tk->rp);
508 ret = register_kprobe(&tk->rp.kp);
513 /* Internal unregister function - just handle k*probes and flags */
514 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
516 if (trace_kprobe_is_registered(tk)) {
517 if (trace_kprobe_is_return(tk))
518 unregister_kretprobe(&tk->rp);
520 unregister_kprobe(&tk->rp.kp);
521 /* Cleanup kprobe for reuse and mark it unregistered */
522 INIT_HLIST_NODE(&tk->rp.kp.hlist);
523 INIT_LIST_HEAD(&tk->rp.kp.list);
524 if (tk->rp.kp.symbol_name)
525 tk->rp.kp.addr = NULL;
529 /* Unregister a trace_probe and probe_event */
530 static int unregister_trace_kprobe(struct trace_kprobe *tk)
532 /* If other probes are on the event, just unregister kprobe */
533 if (trace_probe_has_sibling(&tk->tp))
536 /* Enabled event can not be unregistered */
537 if (trace_probe_is_enabled(&tk->tp))
540 /* If there's a reference to the dynamic event */
541 if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
544 /* Will fail if probe is being used by ftrace or perf */
545 if (unregister_kprobe_event(tk))
549 __unregister_trace_kprobe(tk);
550 dyn_event_remove(&tk->devent);
551 trace_probe_unlink(&tk->tp);
556 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
557 struct trace_kprobe *comp)
559 struct trace_probe_event *tpe = orig->tp.event;
562 list_for_each_entry(orig, &tpe->probes, tp.list) {
563 if (strcmp(trace_kprobe_symbol(orig),
564 trace_kprobe_symbol(comp)) ||
565 trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
569 * trace_probe_compare_arg_type() ensured that nr_args and
570 * each argument name and type are same. Let's compare comm.
572 for (i = 0; i < orig->tp.nr_args; i++) {
573 if (strcmp(orig->tp.args[i].comm,
574 comp->tp.args[i].comm))
578 if (i == orig->tp.nr_args)
585 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
589 ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
591 /* Note that argument starts index = 2 */
592 trace_probe_log_set_index(ret + 1);
593 trace_probe_log_err(0, DIFF_ARG_TYPE);
596 if (trace_kprobe_has_same_kprobe(to, tk)) {
597 trace_probe_log_set_index(0);
598 trace_probe_log_err(0, SAME_PROBE);
602 /* Append to existing event */
603 ret = trace_probe_append(&tk->tp, &to->tp);
607 /* Register k*probe */
608 ret = __register_trace_kprobe(tk);
609 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
610 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
615 trace_probe_unlink(&tk->tp);
617 dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
622 /* Register a trace_probe and probe_event */
623 static int register_trace_kprobe(struct trace_kprobe *tk)
625 struct trace_kprobe *old_tk;
628 mutex_lock(&event_mutex);
630 old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
631 trace_probe_group_name(&tk->tp));
633 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
634 trace_probe_log_set_index(0);
635 trace_probe_log_err(0, DIFF_PROBE_TYPE);
638 ret = append_trace_kprobe(tk, old_tk);
643 /* Register new event */
644 ret = register_kprobe_event(tk);
646 if (ret == -EEXIST) {
647 trace_probe_log_set_index(0);
648 trace_probe_log_err(0, EVENT_EXIST);
650 pr_warn("Failed to register probe event(%d)\n", ret);
654 /* Register k*probe */
655 ret = __register_trace_kprobe(tk);
656 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
657 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
662 unregister_kprobe_event(tk);
664 dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
667 mutex_unlock(&event_mutex);
671 /* Module notifier call back, checking event on the module */
672 static int trace_kprobe_module_callback(struct notifier_block *nb,
673 unsigned long val, void *data)
675 struct module *mod = data;
676 struct dyn_event *pos;
677 struct trace_kprobe *tk;
680 if (val != MODULE_STATE_COMING)
683 /* Update probes on coming module */
684 mutex_lock(&event_mutex);
685 for_each_trace_kprobe(tk, pos) {
686 if (trace_kprobe_within_module(tk, mod)) {
687 /* Don't need to check busy - this should have gone. */
688 __unregister_trace_kprobe(tk);
689 ret = __register_trace_kprobe(tk);
691 pr_warn("Failed to re-register probe %s on %s: %d\n",
692 trace_probe_name(&tk->tp),
693 module_name(mod), ret);
696 mutex_unlock(&event_mutex);
701 static struct notifier_block trace_kprobe_module_nb = {
702 .notifier_call = trace_kprobe_module_callback,
703 .priority = 1 /* Invoked after kprobe module callback */
706 static int __trace_kprobe_create(int argc, const char *argv[])
711 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
713 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
715 * p:[GRP/]EVENT] [MOD:]KSYM[+0]%return [FETCHARGS]
718 * $retval : fetch return value
719 * $stack : fetch stack address
720 * $stackN : fetch Nth of stack (N:0-)
721 * $comm : fetch current task comm
722 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
723 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
724 * %REG : fetch register REG
725 * Dereferencing memory fetch:
726 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
727 * Alias name of args:
728 * NAME=FETCHARG : set NAME as alias of FETCHARG.
730 * FETCHARG:TYPE : use TYPE instead of unsigned long.
732 struct trace_kprobe *tk = NULL;
734 bool is_return = false;
735 char *symbol = NULL, *tmp = NULL;
736 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
737 enum probe_print_type ptype;
741 char buf[MAX_EVENT_NAME_LEN];
742 unsigned int flags = TPARG_FL_KERNEL;
744 switch (argv[0][0]) {
756 trace_probe_log_init("trace_kprobe", argc, argv);
758 event = strchr(&argv[0][1], ':');
762 if (isdigit(argv[0][1])) {
764 trace_probe_log_err(1, MAXACT_NO_KPROBE);
768 len = event - &argv[0][1] - 1;
770 len = strlen(&argv[0][1]);
771 if (len > MAX_EVENT_NAME_LEN - 1) {
772 trace_probe_log_err(1, BAD_MAXACT);
775 memcpy(buf, &argv[0][1], len);
777 ret = kstrtouint(buf, 0, &maxactive);
778 if (ret || !maxactive) {
779 trace_probe_log_err(1, BAD_MAXACT);
782 /* kretprobes instances are iterated over via a list. The
783 * maximum should stay reasonable.
785 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
786 trace_probe_log_err(1, MAXACT_TOO_BIG);
791 /* try to parse an address. if that fails, try to read the
792 * input as a symbol. */
793 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
794 trace_probe_log_set_index(1);
795 /* Check whether uprobe event specified */
796 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
800 /* a symbol specified */
801 symbol = kstrdup(argv[1], GFP_KERNEL);
805 tmp = strchr(symbol, '%');
807 if (!strcmp(tmp, "%return")) {
811 trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
816 /* TODO: support .init module functions */
817 ret = traceprobe_split_symbol_offset(symbol, &offset);
818 if (ret || offset < 0 || offset > UINT_MAX) {
819 trace_probe_log_err(0, BAD_PROBE_ADDR);
823 flags |= TPARG_FL_RETURN;
824 ret = kprobe_on_func_entry(NULL, symbol, offset);
826 flags |= TPARG_FL_FENTRY;
827 /* Defer the ENOENT case until register kprobe */
828 if (ret == -EINVAL && is_return) {
829 trace_probe_log_err(0, BAD_RETPROBE);
834 trace_probe_log_set_index(0);
836 ret = traceprobe_parse_event_name(&event, &group, buf,
841 /* Make a new event name */
843 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
844 is_return ? 'r' : 'p', symbol, offset);
846 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
847 is_return ? 'r' : 'p', addr);
848 sanitize_event_name(buf);
853 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
854 argc - 2, is_return);
857 /* This must return -ENOMEM, else there is a bug */
858 WARN_ON_ONCE(ret != -ENOMEM);
859 goto out; /* We know tk is not allocated */
861 argc -= 2; argv += 2;
863 /* parse arguments */
864 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
865 trace_probe_log_set_index(i + 2);
866 ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], flags);
868 goto error; /* This can be -ENOMEM */
871 ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
872 ret = traceprobe_set_print_fmt(&tk->tp, ptype);
876 ret = register_trace_kprobe(tk);
878 trace_probe_log_set_index(1);
880 trace_probe_log_err(0, BAD_INSN_BNDRY);
881 else if (ret == -ENOENT)
882 trace_probe_log_err(0, BAD_PROBE_ADDR);
883 else if (ret != -ENOMEM && ret != -EEXIST)
884 trace_probe_log_err(0, FAIL_REG_PROBE);
889 trace_probe_log_clear();
896 free_trace_kprobe(tk);
900 static int trace_kprobe_create(const char *raw_command)
902 return trace_probe_create(raw_command, __trace_kprobe_create);
905 static int create_or_delete_trace_kprobe(const char *raw_command)
909 if (raw_command[0] == '-')
910 return dyn_event_release(raw_command, &trace_kprobe_ops);
912 ret = trace_kprobe_create(raw_command);
913 return ret == -ECANCELED ? -EINVAL : ret;
916 static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
918 return create_or_delete_trace_kprobe(cmd->seq.buffer);
922 * kprobe_event_cmd_init - Initialize a kprobe event command object
923 * @cmd: A pointer to the dynevent_cmd struct representing the new event
924 * @buf: A pointer to the buffer used to build the command
925 * @maxlen: The length of the buffer passed in @buf
927 * Initialize a synthetic event command object. Use this before
928 * calling any of the other kprobe_event functions.
930 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
932 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
933 trace_kprobe_run_command);
935 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
938 * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
939 * @cmd: A pointer to the dynevent_cmd struct representing the new event
940 * @name: The name of the kprobe event
941 * @loc: The location of the kprobe event
942 * @kretprobe: Is this a return probe?
943 * @args: Variable number of arg (pairs), one pair for each field
945 * NOTE: Users normally won't want to call this function directly, but
946 * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
947 * adds a NULL to the end of the arg list. If this function is used
948 * directly, make sure the last arg in the variable arg list is NULL.
950 * Generate a kprobe event command to be executed by
951 * kprobe_event_gen_cmd_end(). This function can be used to generate the
952 * complete command or only the first part of it; in the latter case,
953 * kprobe_event_add_fields() can be used to add more fields following this.
955 * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
956 * returns -EINVAL if @loc == NULL.
958 * Return: 0 if successful, error otherwise.
960 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
961 const char *name, const char *loc, ...)
963 char buf[MAX_EVENT_NAME_LEN];
964 struct dynevent_arg arg;
968 if (cmd->type != DYNEVENT_TYPE_KPROBE)
975 snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
977 snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
979 ret = dynevent_str_add(cmd, buf);
983 dynevent_arg_init(&arg, 0);
985 ret = dynevent_arg_add(cmd, &arg, NULL);
993 field = va_arg(args, const char *);
997 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1003 ret = dynevent_arg_add(cmd, &arg, NULL);
1011 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1014 * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1015 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1016 * @args: Variable number of arg (pairs), one pair for each field
1018 * NOTE: Users normally won't want to call this function directly, but
1019 * rather use the kprobe_event_add_fields() wrapper, which
1020 * automatically adds a NULL to the end of the arg list. If this
1021 * function is used directly, make sure the last arg in the variable
1024 * Add probe fields to an existing kprobe command using a variable
1025 * list of args. Fields are added in the same order they're listed.
1027 * Return: 0 if successful, error otherwise.
1029 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1031 struct dynevent_arg arg;
1035 if (cmd->type != DYNEVENT_TYPE_KPROBE)
1038 dynevent_arg_init(&arg, 0);
1040 va_start(args, cmd);
1044 field = va_arg(args, const char *);
1048 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1054 ret = dynevent_arg_add(cmd, &arg, NULL);
1062 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1065 * kprobe_event_delete - Delete a kprobe event
1066 * @name: The name of the kprobe event to delete
1068 * Delete a kprobe event with the give @name from kernel code rather
1069 * than directly from the command line.
1071 * Return: 0 if successful, error otherwise.
1073 int kprobe_event_delete(const char *name)
1075 char buf[MAX_EVENT_NAME_LEN];
1077 snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1079 return create_or_delete_trace_kprobe(buf);
1081 EXPORT_SYMBOL_GPL(kprobe_event_delete);
1083 static int trace_kprobe_release(struct dyn_event *ev)
1085 struct trace_kprobe *tk = to_trace_kprobe(ev);
1086 int ret = unregister_trace_kprobe(tk);
1089 free_trace_kprobe(tk);
1093 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1095 struct trace_kprobe *tk = to_trace_kprobe(ev);
1098 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1099 if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1100 seq_printf(m, "%d", tk->rp.maxactive);
1101 seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1102 trace_probe_name(&tk->tp));
1105 seq_printf(m, " 0x%p", tk->rp.kp.addr);
1106 else if (tk->rp.kp.offset)
1107 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1110 seq_printf(m, " %s", trace_kprobe_symbol(tk));
1112 for (i = 0; i < tk->tp.nr_args; i++)
1113 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1119 static int probes_seq_show(struct seq_file *m, void *v)
1121 struct dyn_event *ev = v;
1123 if (!is_trace_kprobe(ev))
1126 return trace_kprobe_show(m, ev);
1129 static const struct seq_operations probes_seq_op = {
1130 .start = dyn_event_seq_start,
1131 .next = dyn_event_seq_next,
1132 .stop = dyn_event_seq_stop,
1133 .show = probes_seq_show
1136 static int probes_open(struct inode *inode, struct file *file)
1140 ret = security_locked_down(LOCKDOWN_TRACEFS);
1144 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1145 ret = dyn_events_release_all(&trace_kprobe_ops);
1150 return seq_open(file, &probes_seq_op);
1153 static ssize_t probes_write(struct file *file, const char __user *buffer,
1154 size_t count, loff_t *ppos)
1156 return trace_parse_run_command(file, buffer, count, ppos,
1157 create_or_delete_trace_kprobe);
1160 static const struct file_operations kprobe_events_ops = {
1161 .owner = THIS_MODULE,
1162 .open = probes_open,
1164 .llseek = seq_lseek,
1165 .release = seq_release,
1166 .write = probes_write,
1169 /* Probes profiling interfaces */
1170 static int probes_profile_seq_show(struct seq_file *m, void *v)
1172 struct dyn_event *ev = v;
1173 struct trace_kprobe *tk;
1174 unsigned long nmissed;
1176 if (!is_trace_kprobe(ev))
1179 tk = to_trace_kprobe(ev);
1180 nmissed = trace_kprobe_is_return(tk) ?
1181 tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
1182 seq_printf(m, " %-44s %15lu %15lu\n",
1183 trace_probe_name(&tk->tp),
1184 trace_kprobe_nhit(tk),
1190 static const struct seq_operations profile_seq_op = {
1191 .start = dyn_event_seq_start,
1192 .next = dyn_event_seq_next,
1193 .stop = dyn_event_seq_stop,
1194 .show = probes_profile_seq_show
1197 static int profile_open(struct inode *inode, struct file *file)
1201 ret = security_locked_down(LOCKDOWN_TRACEFS);
1205 return seq_open(file, &profile_seq_op);
1208 static const struct file_operations kprobe_profile_ops = {
1209 .owner = THIS_MODULE,
1210 .open = profile_open,
1212 .llseek = seq_lseek,
1213 .release = seq_release,
1216 /* Kprobe specific fetch functions */
1218 /* Return the length of string -- including null terminal byte */
1219 static nokprobe_inline int
1220 fetch_store_strlen_user(unsigned long addr)
1222 const void __user *uaddr = (__force const void __user *)addr;
1224 return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
1227 /* Return the length of string -- including null terminal byte */
1228 static nokprobe_inline int
1229 fetch_store_strlen(unsigned long addr)
1234 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1235 if (addr < TASK_SIZE)
1236 return fetch_store_strlen_user(addr);
1240 ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
1242 } while (c && ret == 0 && len < MAX_STRING_SIZE);
1244 return (ret < 0) ? ret : len;
1248 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1249 * with max length and relative data location.
1251 static nokprobe_inline int
1252 fetch_store_string_user(unsigned long addr, void *dest, void *base)
1254 const void __user *uaddr = (__force const void __user *)addr;
1255 int maxlen = get_loc_len(*(u32 *)dest);
1259 if (unlikely(!maxlen))
1262 __dest = get_loc_data(dest, base);
1264 ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
1266 *(u32 *)dest = make_data_loc(ret, __dest - base);
1272 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1273 * length and relative data location.
1275 static nokprobe_inline int
1276 fetch_store_string(unsigned long addr, void *dest, void *base)
1278 int maxlen = get_loc_len(*(u32 *)dest);
1282 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1283 if ((unsigned long)addr < TASK_SIZE)
1284 return fetch_store_string_user(addr, dest, base);
1287 if (unlikely(!maxlen))
1290 __dest = get_loc_data(dest, base);
1293 * Try to get string again, since the string can be changed while
1296 ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
1298 *(u32 *)dest = make_data_loc(ret, __dest - base);
1303 static nokprobe_inline int
1304 probe_mem_read_user(void *dest, void *src, size_t size)
1306 const void __user *uaddr = (__force const void __user *)src;
1308 return copy_from_user_nofault(dest, uaddr, size);
1311 static nokprobe_inline int
1312 probe_mem_read(void *dest, void *src, size_t size)
1314 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1315 if ((unsigned long)src < TASK_SIZE)
1316 return probe_mem_read_user(dest, src, size);
1318 return copy_from_kernel_nofault(dest, src, size);
1321 /* Note that we don't verify it, since the code does not come from user space */
1323 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
1326 struct pt_regs *regs = rec;
1330 /* 1st stage: get value from context */
1333 val = regs_get_register(regs, code->param);
1335 case FETCH_OP_STACK:
1336 val = regs_get_kernel_stack_nth(regs, code->param);
1338 case FETCH_OP_STACKP:
1339 val = kernel_stack_pointer(regs);
1341 case FETCH_OP_RETVAL:
1342 val = regs_return_value(regs);
1345 val = code->immediate;
1348 val = (unsigned long)current->comm;
1351 val = (unsigned long)code->data;
1353 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1355 val = regs_get_kernel_argument(regs, code->param);
1358 case FETCH_NOP_SYMBOL: /* Ignore a place holder */
1366 return process_fetch_insn_bottom(code, val, dest, base);
1368 NOKPROBE_SYMBOL(process_fetch_insn)
1370 /* Kprobe handler */
1371 static nokprobe_inline void
1372 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1373 struct trace_event_file *trace_file)
1375 struct kprobe_trace_entry_head *entry;
1376 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1377 struct trace_event_buffer fbuffer;
1380 WARN_ON(call != trace_file->event_call);
1382 if (trace_trigger_soft_disabled(trace_file))
1385 dsize = __get_data_size(&tk->tp, regs);
1387 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1388 sizeof(*entry) + tk->tp.size + dsize);
1392 fbuffer.regs = regs;
1393 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1394 entry->ip = (unsigned long)tk->rp.kp.addr;
1395 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1397 trace_event_buffer_commit(&fbuffer);
1401 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1403 struct event_file_link *link;
1405 trace_probe_for_each_link_rcu(link, &tk->tp)
1406 __kprobe_trace_func(tk, regs, link->file);
1408 NOKPROBE_SYMBOL(kprobe_trace_func);
1410 /* Kretprobe handler */
1411 static nokprobe_inline void
1412 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1413 struct pt_regs *regs,
1414 struct trace_event_file *trace_file)
1416 struct kretprobe_trace_entry_head *entry;
1417 struct trace_event_buffer fbuffer;
1418 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1421 WARN_ON(call != trace_file->event_call);
1423 if (trace_trigger_soft_disabled(trace_file))
1426 dsize = __get_data_size(&tk->tp, regs);
1428 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1429 sizeof(*entry) + tk->tp.size + dsize);
1433 fbuffer.regs = regs;
1434 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1435 entry->func = (unsigned long)tk->rp.kp.addr;
1436 entry->ret_ip = get_kretprobe_retaddr(ri);
1437 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1439 trace_event_buffer_commit(&fbuffer);
1443 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1444 struct pt_regs *regs)
1446 struct event_file_link *link;
1448 trace_probe_for_each_link_rcu(link, &tk->tp)
1449 __kretprobe_trace_func(tk, ri, regs, link->file);
1451 NOKPROBE_SYMBOL(kretprobe_trace_func);
1453 /* Event entry printers */
1454 static enum print_line_t
1455 print_kprobe_event(struct trace_iterator *iter, int flags,
1456 struct trace_event *event)
1458 struct kprobe_trace_entry_head *field;
1459 struct trace_seq *s = &iter->seq;
1460 struct trace_probe *tp;
1462 field = (struct kprobe_trace_entry_head *)iter->ent;
1463 tp = trace_probe_primary_from_call(
1464 container_of(event, struct trace_event_call, event));
1465 if (WARN_ON_ONCE(!tp))
1468 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1470 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1473 trace_seq_putc(s, ')');
1475 if (print_probe_args(s, tp->args, tp->nr_args,
1476 (u8 *)&field[1], field) < 0)
1479 trace_seq_putc(s, '\n');
1481 return trace_handle_return(s);
1484 static enum print_line_t
1485 print_kretprobe_event(struct trace_iterator *iter, int flags,
1486 struct trace_event *event)
1488 struct kretprobe_trace_entry_head *field;
1489 struct trace_seq *s = &iter->seq;
1490 struct trace_probe *tp;
1492 field = (struct kretprobe_trace_entry_head *)iter->ent;
1493 tp = trace_probe_primary_from_call(
1494 container_of(event, struct trace_event_call, event));
1495 if (WARN_ON_ONCE(!tp))
1498 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1500 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1503 trace_seq_puts(s, " <- ");
1505 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1508 trace_seq_putc(s, ')');
1510 if (print_probe_args(s, tp->args, tp->nr_args,
1511 (u8 *)&field[1], field) < 0)
1514 trace_seq_putc(s, '\n');
1517 return trace_handle_return(s);
1521 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1524 struct kprobe_trace_entry_head field;
1525 struct trace_probe *tp;
1527 tp = trace_probe_primary_from_call(event_call);
1528 if (WARN_ON_ONCE(!tp))
1531 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1533 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1536 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1539 struct kretprobe_trace_entry_head field;
1540 struct trace_probe *tp;
1542 tp = trace_probe_primary_from_call(event_call);
1543 if (WARN_ON_ONCE(!tp))
1546 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1547 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1549 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1552 #ifdef CONFIG_PERF_EVENTS
1554 /* Kprobe profile handler */
1556 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1558 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1559 struct kprobe_trace_entry_head *entry;
1560 struct hlist_head *head;
1561 int size, __size, dsize;
1564 if (bpf_prog_array_valid(call)) {
1565 unsigned long orig_ip = instruction_pointer(regs);
1568 ret = trace_call_bpf(call, regs);
1571 * We need to check and see if we modified the pc of the
1572 * pt_regs, and if so return 1 so that we don't do the
1575 if (orig_ip != instruction_pointer(regs))
1581 head = this_cpu_ptr(call->perf_events);
1582 if (hlist_empty(head))
1585 dsize = __get_data_size(&tk->tp, regs);
1586 __size = sizeof(*entry) + tk->tp.size + dsize;
1587 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1588 size -= sizeof(u32);
1590 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1594 entry->ip = (unsigned long)tk->rp.kp.addr;
1595 memset(&entry[1], 0, dsize);
1596 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1597 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1601 NOKPROBE_SYMBOL(kprobe_perf_func);
1603 /* Kretprobe profile handler */
1605 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1606 struct pt_regs *regs)
1608 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1609 struct kretprobe_trace_entry_head *entry;
1610 struct hlist_head *head;
1611 int size, __size, dsize;
1614 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1617 head = this_cpu_ptr(call->perf_events);
1618 if (hlist_empty(head))
1621 dsize = __get_data_size(&tk->tp, regs);
1622 __size = sizeof(*entry) + tk->tp.size + dsize;
1623 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1624 size -= sizeof(u32);
1626 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1630 entry->func = (unsigned long)tk->rp.kp.addr;
1631 entry->ret_ip = get_kretprobe_retaddr(ri);
1632 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1633 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1636 NOKPROBE_SYMBOL(kretprobe_perf_func);
1638 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1639 const char **symbol, u64 *probe_offset,
1640 u64 *probe_addr, bool perf_type_tracepoint)
1642 const char *pevent = trace_event_name(event->tp_event);
1643 const char *group = event->tp_event->class->system;
1644 struct trace_kprobe *tk;
1646 if (perf_type_tracepoint)
1647 tk = find_trace_kprobe(pevent, group);
1649 tk = trace_kprobe_primary_from_call(event->tp_event);
1653 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1654 : BPF_FD_TYPE_KPROBE;
1656 *symbol = tk->symbol;
1657 *probe_offset = tk->rp.kp.offset;
1662 *probe_addr = (unsigned long)tk->rp.kp.addr;
1666 #endif /* CONFIG_PERF_EVENTS */
1669 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1671 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1672 * lockless, but we can't race with this __init function.
1674 static int kprobe_register(struct trace_event_call *event,
1675 enum trace_reg type, void *data)
1677 struct trace_event_file *file = data;
1680 case TRACE_REG_REGISTER:
1681 return enable_trace_kprobe(event, file);
1682 case TRACE_REG_UNREGISTER:
1683 return disable_trace_kprobe(event, file);
1685 #ifdef CONFIG_PERF_EVENTS
1686 case TRACE_REG_PERF_REGISTER:
1687 return enable_trace_kprobe(event, NULL);
1688 case TRACE_REG_PERF_UNREGISTER:
1689 return disable_trace_kprobe(event, NULL);
1690 case TRACE_REG_PERF_OPEN:
1691 case TRACE_REG_PERF_CLOSE:
1692 case TRACE_REG_PERF_ADD:
1693 case TRACE_REG_PERF_DEL:
1700 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1702 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1705 raw_cpu_inc(*tk->nhit);
1707 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1708 kprobe_trace_func(tk, regs);
1709 #ifdef CONFIG_PERF_EVENTS
1710 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1711 ret = kprobe_perf_func(tk, regs);
1715 NOKPROBE_SYMBOL(kprobe_dispatcher);
1718 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1720 struct kretprobe *rp = get_kretprobe(ri);
1721 struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
1723 raw_cpu_inc(*tk->nhit);
1725 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1726 kretprobe_trace_func(tk, ri, regs);
1727 #ifdef CONFIG_PERF_EVENTS
1728 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1729 kretprobe_perf_func(tk, ri, regs);
1731 return 0; /* We don't tweak kernel, so just return 0 */
1733 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1735 static struct trace_event_functions kretprobe_funcs = {
1736 .trace = print_kretprobe_event
1739 static struct trace_event_functions kprobe_funcs = {
1740 .trace = print_kprobe_event
1743 static struct trace_event_fields kretprobe_fields_array[] = {
1744 { .type = TRACE_FUNCTION_TYPE,
1745 .define_fields = kretprobe_event_define_fields },
1749 static struct trace_event_fields kprobe_fields_array[] = {
1750 { .type = TRACE_FUNCTION_TYPE,
1751 .define_fields = kprobe_event_define_fields },
1755 static inline void init_trace_event_call(struct trace_kprobe *tk)
1757 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1759 if (trace_kprobe_is_return(tk)) {
1760 call->event.funcs = &kretprobe_funcs;
1761 call->class->fields_array = kretprobe_fields_array;
1763 call->event.funcs = &kprobe_funcs;
1764 call->class->fields_array = kprobe_fields_array;
1767 call->flags = TRACE_EVENT_FL_KPROBE;
1768 call->class->reg = kprobe_register;
1771 static int register_kprobe_event(struct trace_kprobe *tk)
1773 init_trace_event_call(tk);
1775 return trace_probe_register_event_call(&tk->tp);
1778 static int unregister_kprobe_event(struct trace_kprobe *tk)
1780 return trace_probe_unregister_event_call(&tk->tp);
1783 #ifdef CONFIG_PERF_EVENTS
1784 /* create a trace_kprobe, but don't add it to global lists */
1785 struct trace_event_call *
1786 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1789 enum probe_print_type ptype;
1790 struct trace_kprobe *tk;
1795 * local trace_kprobes are not added to dyn_event, so they are never
1796 * searched in find_trace_kprobe(). Therefore, there is no concern of
1797 * duplicated name here.
1799 event = func ? func : "DUMMY_EVENT";
1801 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1802 offs, 0 /* maxactive */, 0 /* nargs */,
1806 pr_info("Failed to allocate trace_probe.(%d)\n",
1808 return ERR_CAST(tk);
1811 init_trace_event_call(tk);
1813 ptype = trace_kprobe_is_return(tk) ?
1814 PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1815 if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) {
1820 ret = __register_trace_kprobe(tk);
1824 return trace_probe_event_call(&tk->tp);
1826 free_trace_kprobe(tk);
1827 return ERR_PTR(ret);
1830 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1832 struct trace_kprobe *tk;
1834 tk = trace_kprobe_primary_from_call(event_call);
1838 if (trace_probe_is_enabled(&tk->tp)) {
1843 __unregister_trace_kprobe(tk);
1845 free_trace_kprobe(tk);
1847 #endif /* CONFIG_PERF_EVENTS */
1849 static __init void enable_boot_kprobe_events(void)
1851 struct trace_array *tr = top_trace_array();
1852 struct trace_event_file *file;
1853 struct trace_kprobe *tk;
1854 struct dyn_event *pos;
1856 mutex_lock(&event_mutex);
1857 for_each_trace_kprobe(tk, pos) {
1858 list_for_each_entry(file, &tr->events, list)
1859 if (file->event_call == trace_probe_event_call(&tk->tp))
1860 trace_event_enable_disable(file, 1, 0);
1862 mutex_unlock(&event_mutex);
1865 static __init void setup_boot_kprobe_events(void)
1867 char *p, *cmd = kprobe_boot_events_buf;
1870 strreplace(kprobe_boot_events_buf, ',', ' ');
1872 while (cmd && *cmd != '\0') {
1873 p = strchr(cmd, ';');
1877 ret = create_or_delete_trace_kprobe(cmd);
1879 pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1884 enable_boot_kprobe_events();
1888 * Register dynevent at core_initcall. This allows kernel to setup kprobe
1889 * events in postcore_initcall without tracefs.
1891 static __init int init_kprobe_trace_early(void)
1895 ret = dyn_event_register(&trace_kprobe_ops);
1899 if (register_module_notifier(&trace_kprobe_module_nb))
1904 core_initcall(init_kprobe_trace_early);
1906 /* Make a tracefs interface for controlling probe points */
1907 static __init int init_kprobe_trace(void)
1910 struct dentry *entry;
1912 ret = tracing_init_dentry();
1916 entry = tracefs_create_file("kprobe_events", TRACE_MODE_WRITE,
1917 NULL, NULL, &kprobe_events_ops);
1919 /* Event list interface */
1921 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1923 /* Profile interface */
1924 entry = tracefs_create_file("kprobe_profile", TRACE_MODE_READ,
1925 NULL, NULL, &kprobe_profile_ops);
1928 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1930 setup_boot_kprobe_events();
1934 fs_initcall(init_kprobe_trace);
1937 #ifdef CONFIG_FTRACE_STARTUP_TEST
1938 static __init struct trace_event_file *
1939 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1941 struct trace_event_file *file;
1943 list_for_each_entry(file, &tr->events, list)
1944 if (file->event_call == trace_probe_event_call(&tk->tp))
1951 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1952 * stage, we can do this lockless.
1954 static __init int kprobe_trace_self_tests_init(void)
1957 int (*target)(int, int, int, int, int, int);
1958 struct trace_kprobe *tk;
1959 struct trace_event_file *file;
1961 if (tracing_is_disabled())
1964 if (tracing_selftest_disabled)
1967 target = kprobe_trace_selftest_target;
1969 pr_info("Testing kprobe tracing: ");
1971 ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
1972 if (WARN_ON_ONCE(ret)) {
1973 pr_warn("error on probing function entry.\n");
1976 /* Enable trace point */
1977 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1978 if (WARN_ON_ONCE(tk == NULL)) {
1979 pr_warn("error on getting new probe.\n");
1982 file = find_trace_probe_file(tk, top_trace_array());
1983 if (WARN_ON_ONCE(file == NULL)) {
1984 pr_warn("error on getting probe file.\n");
1987 enable_trace_kprobe(
1988 trace_probe_event_call(&tk->tp), file);
1992 ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
1993 if (WARN_ON_ONCE(ret)) {
1994 pr_warn("error on probing function return.\n");
1997 /* Enable trace point */
1998 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1999 if (WARN_ON_ONCE(tk == NULL)) {
2000 pr_warn("error on getting 2nd new probe.\n");
2003 file = find_trace_probe_file(tk, top_trace_array());
2004 if (WARN_ON_ONCE(file == NULL)) {
2005 pr_warn("error on getting probe file.\n");
2008 enable_trace_kprobe(
2009 trace_probe_event_call(&tk->tp), file);
2016 ret = target(1, 2, 3, 4, 5, 6);
2019 * Not expecting an error here, the check is only to prevent the
2020 * optimizer from removing the call to target() as otherwise there
2021 * are no side-effects and the call is never performed.
2026 /* Disable trace points before removing it */
2027 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2028 if (WARN_ON_ONCE(tk == NULL)) {
2029 pr_warn("error on getting test probe.\n");
2032 if (trace_kprobe_nhit(tk) != 1) {
2033 pr_warn("incorrect number of testprobe hits\n");
2037 file = find_trace_probe_file(tk, top_trace_array());
2038 if (WARN_ON_ONCE(file == NULL)) {
2039 pr_warn("error on getting probe file.\n");
2042 disable_trace_kprobe(
2043 trace_probe_event_call(&tk->tp), file);
2046 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2047 if (WARN_ON_ONCE(tk == NULL)) {
2048 pr_warn("error on getting 2nd test probe.\n");
2051 if (trace_kprobe_nhit(tk) != 1) {
2052 pr_warn("incorrect number of testprobe2 hits\n");
2056 file = find_trace_probe_file(tk, top_trace_array());
2057 if (WARN_ON_ONCE(file == NULL)) {
2058 pr_warn("error on getting probe file.\n");
2061 disable_trace_kprobe(
2062 trace_probe_event_call(&tk->tp), file);
2065 ret = create_or_delete_trace_kprobe("-:testprobe");
2066 if (WARN_ON_ONCE(ret)) {
2067 pr_warn("error on deleting a probe.\n");
2071 ret = create_or_delete_trace_kprobe("-:testprobe2");
2072 if (WARN_ON_ONCE(ret)) {
2073 pr_warn("error on deleting a probe.\n");
2078 ret = dyn_events_release_all(&trace_kprobe_ops);
2079 if (WARN_ON_ONCE(ret)) {
2080 pr_warn("error on cleaning up probes.\n");
2084 * Wait for the optimizer work to finish. Otherwise it might fiddle
2085 * with probes in already freed __init text.
2087 wait_for_kprobe_optimizer();
2089 pr_cont("NG: Some tests are failed. Please check them.\n");
2095 late_initcall(kprobe_trace_self_tests_init);