1 // SPDX-License-Identifier: GPL-2.0
5 * Part of this code was copied from kernel/trace/trace_kprobe.c written by
6 * Masami Hiramatsu <mhiramat@kernel.org>
8 * Copyright (C) 2021, VMware Inc, Steven Rostedt <rostedt@goodmis.org>
9 * Copyright (C) 2021, VMware Inc, Tzvetomir Stoyanov tz.stoyanov@gmail.com>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/ftrace.h>
16 #include "trace_dynevent.h"
17 #include "trace_probe.h"
18 #include "trace_probe_tmpl.h"
20 #define EPROBE_EVENT_SYSTEM "eprobes"
23 /* tracepoint system */
24 const char *event_system;
26 /* tracepoint event */
27 const char *event_name;
29 struct trace_event_call *event;
31 struct dyn_event devent;
32 struct trace_probe tp;
36 struct trace_event_file *file;
37 struct trace_eprobe *ep;
40 static int __trace_eprobe_create(int argc, const char *argv[]);
42 static void trace_event_probe_cleanup(struct trace_eprobe *ep)
46 trace_probe_cleanup(&ep->tp);
47 kfree(ep->event_name);
48 kfree(ep->event_system);
50 trace_event_put_ref(ep->event);
54 static struct trace_eprobe *to_trace_eprobe(struct dyn_event *ev)
56 return container_of(ev, struct trace_eprobe, devent);
59 static int eprobe_dyn_event_create(const char *raw_command)
61 return trace_probe_create(raw_command, __trace_eprobe_create);
64 static int eprobe_dyn_event_show(struct seq_file *m, struct dyn_event *ev)
66 struct trace_eprobe *ep = to_trace_eprobe(ev);
69 seq_printf(m, "e:%s/%s", trace_probe_group_name(&ep->tp),
70 trace_probe_name(&ep->tp));
71 seq_printf(m, " %s.%s", ep->event_system, ep->event_name);
73 for (i = 0; i < ep->tp.nr_args; i++)
74 seq_printf(m, " %s=%s", ep->tp.args[i].name, ep->tp.args[i].comm);
80 static int unregister_trace_eprobe(struct trace_eprobe *ep)
82 /* If other probes are on the event, just unregister eprobe */
83 if (trace_probe_has_sibling(&ep->tp))
86 /* Enabled event can not be unregistered */
87 if (trace_probe_is_enabled(&ep->tp))
90 /* Will fail if probe is being used by ftrace or perf */
91 if (trace_probe_unregister_event_call(&ep->tp))
95 dyn_event_remove(&ep->devent);
96 trace_probe_unlink(&ep->tp);
101 static int eprobe_dyn_event_release(struct dyn_event *ev)
103 struct trace_eprobe *ep = to_trace_eprobe(ev);
104 int ret = unregister_trace_eprobe(ep);
107 trace_event_probe_cleanup(ep);
111 static bool eprobe_dyn_event_is_busy(struct dyn_event *ev)
113 struct trace_eprobe *ep = to_trace_eprobe(ev);
115 return trace_probe_is_enabled(&ep->tp);
118 static bool eprobe_dyn_event_match(const char *system, const char *event,
119 int argc, const char **argv, struct dyn_event *ev)
121 struct trace_eprobe *ep = to_trace_eprobe(ev);
125 * We match the following:
126 * event only - match all eprobes with event name
127 * system and event only - match all system/event probes
129 * The below has the above satisfied with more arguments:
131 * attached system/event - If the arg has the system and event
132 * the probe is attached to, match
133 * probes with the attachment.
135 * If any more args are given, then it requires a full match.
139 * If system exists, but this probe is not part of that system
142 if (system && strcmp(trace_probe_group_name(&ep->tp), system) != 0)
145 /* Must match the event name */
146 if (strcmp(trace_probe_name(&ep->tp), event) != 0)
149 /* No arguments match all */
153 /* First argument is the system/event the probe is attached to */
155 slash = strchr(argv[0], '/');
157 slash = strchr(argv[0], '.');
161 if (strncmp(ep->event_system, argv[0], slash - argv[0]))
163 if (strcmp(ep->event_name, slash + 1))
169 /* If there are no other args, then match */
173 return trace_probe_match_command_args(&ep->tp, argc, argv);
176 static struct dyn_event_operations eprobe_dyn_event_ops = {
177 .create = eprobe_dyn_event_create,
178 .show = eprobe_dyn_event_show,
179 .is_busy = eprobe_dyn_event_is_busy,
180 .free = eprobe_dyn_event_release,
181 .match = eprobe_dyn_event_match,
184 static struct trace_eprobe *alloc_event_probe(const char *group,
185 const char *this_event,
186 struct trace_event_call *event,
189 struct trace_eprobe *ep;
190 const char *event_name;
191 const char *sys_name;
195 return ERR_PTR(-ENODEV);
197 sys_name = event->class->system;
198 event_name = trace_event_name(event);
200 ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL);
202 trace_event_put_ref(event);
206 ep->event_name = kstrdup(event_name, GFP_KERNEL);
209 ep->event_system = kstrdup(sys_name, GFP_KERNEL);
210 if (!ep->event_system)
213 ret = trace_probe_init(&ep->tp, this_event, group, false);
217 dyn_event_init(&ep->devent, &eprobe_dyn_event_ops);
220 trace_event_probe_cleanup(ep);
224 static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
226 struct probe_arg *parg = &ep->tp.args[i];
227 struct ftrace_event_field *field;
228 struct list_head *head;
230 head = trace_get_fields(ep->event);
231 list_for_each_entry(field, head, link) {
232 if (!strcmp(parg->code->data, field->name)) {
233 kfree(parg->code->data);
234 parg->code->data = field;
238 kfree(parg->code->data);
239 parg->code->data = NULL;
243 static int eprobe_event_define_fields(struct trace_event_call *event_call)
246 struct eprobe_trace_entry_head field;
247 struct trace_probe *tp;
249 tp = trace_probe_primary_from_call(event_call);
250 if (WARN_ON_ONCE(!tp))
253 DEFINE_FIELD(unsigned int, type, FIELD_STRING_TYPE, 0);
255 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
258 static struct trace_event_fields eprobe_fields_array[] = {
259 { .type = TRACE_FUNCTION_TYPE,
260 .define_fields = eprobe_event_define_fields },
264 /* Event entry printers */
265 static enum print_line_t
266 print_eprobe_event(struct trace_iterator *iter, int flags,
267 struct trace_event *event)
269 struct eprobe_trace_entry_head *field;
270 struct trace_event_call *pevent;
271 struct trace_event *probed_event;
272 struct trace_seq *s = &iter->seq;
273 struct trace_probe *tp;
275 field = (struct eprobe_trace_entry_head *)iter->ent;
276 tp = trace_probe_primary_from_call(
277 container_of(event, struct trace_event_call, event));
278 if (WARN_ON_ONCE(!tp))
281 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
283 probed_event = ftrace_find_event(field->type);
285 pevent = container_of(probed_event, struct trace_event_call, event);
286 trace_seq_printf(s, "%s.%s", pevent->class->system,
287 trace_event_name(pevent));
289 trace_seq_printf(s, "%u", field->type);
292 trace_seq_putc(s, ')');
294 if (print_probe_args(s, tp->args, tp->nr_args,
295 (u8 *)&field[1], field) < 0)
298 trace_seq_putc(s, '\n');
300 return trace_handle_return(s);
303 static unsigned long get_event_field(struct fetch_insn *code, void *rec)
305 struct ftrace_event_field *field = code->data;
309 addr = rec + field->offset;
311 switch (field->size) {
313 if (field->is_signed)
316 val = *(unsigned char *)addr;
319 if (field->is_signed)
320 val = *(short *)addr;
322 val = *(unsigned short *)addr;
325 if (field->is_signed)
328 val = *(unsigned int *)addr;
331 if (field->is_signed)
334 val = *(unsigned long *)addr;
340 static int get_eprobe_size(struct trace_probe *tp, void *rec)
342 struct probe_arg *arg;
345 for (i = 0; i < tp->nr_args; i++) {
347 if (unlikely(arg->dynamic)) {
350 val = get_event_field(arg->code, rec);
351 len = process_fetch_insn_bottom(arg->code + 1, val, NULL, NULL);
360 /* Kprobe specific fetch functions */
362 /* Note that we don't verify it, since the code does not come from user space */
364 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
369 val = get_event_field(code, rec);
370 return process_fetch_insn_bottom(code + 1, val, dest, base);
372 NOKPROBE_SYMBOL(process_fetch_insn)
374 /* Return the length of string -- including null terminal byte */
375 static nokprobe_inline int
376 fetch_store_strlen_user(unsigned long addr)
378 const void __user *uaddr = (__force const void __user *)addr;
380 return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
383 /* Return the length of string -- including null terminal byte */
384 static nokprobe_inline int
385 fetch_store_strlen(unsigned long addr)
390 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
391 if (addr < TASK_SIZE)
392 return fetch_store_strlen_user(addr);
396 ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
398 } while (c && ret == 0 && len < MAX_STRING_SIZE);
400 return (ret < 0) ? ret : len;
404 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
405 * with max length and relative data location.
407 static nokprobe_inline int
408 fetch_store_string_user(unsigned long addr, void *dest, void *base)
410 const void __user *uaddr = (__force const void __user *)addr;
411 int maxlen = get_loc_len(*(u32 *)dest);
415 if (unlikely(!maxlen))
418 __dest = get_loc_data(dest, base);
420 ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
422 *(u32 *)dest = make_data_loc(ret, __dest - base);
428 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
429 * length and relative data location.
431 static nokprobe_inline int
432 fetch_store_string(unsigned long addr, void *dest, void *base)
434 int maxlen = get_loc_len(*(u32 *)dest);
438 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
439 if ((unsigned long)addr < TASK_SIZE)
440 return fetch_store_string_user(addr, dest, base);
443 if (unlikely(!maxlen))
446 __dest = get_loc_data(dest, base);
449 * Try to get string again, since the string can be changed while
452 ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
454 *(u32 *)dest = make_data_loc(ret, __dest - base);
459 static nokprobe_inline int
460 probe_mem_read_user(void *dest, void *src, size_t size)
462 const void __user *uaddr = (__force const void __user *)src;
464 return copy_from_user_nofault(dest, uaddr, size);
467 static nokprobe_inline int
468 probe_mem_read(void *dest, void *src, size_t size)
470 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
471 if ((unsigned long)src < TASK_SIZE)
472 return probe_mem_read_user(dest, src, size);
474 return copy_from_kernel_nofault(dest, src, size);
479 __eprobe_trace_func(struct eprobe_data *edata, void *rec)
481 struct eprobe_trace_entry_head *entry;
482 struct trace_event_call *call = trace_probe_event_call(&edata->ep->tp);
483 struct trace_event_buffer fbuffer;
486 if (WARN_ON_ONCE(call != edata->file->event_call))
489 if (trace_trigger_soft_disabled(edata->file))
492 fbuffer.trace_ctx = tracing_gen_ctx();
493 fbuffer.trace_file = edata->file;
495 dsize = get_eprobe_size(&edata->ep->tp, rec);
499 trace_event_buffer_lock_reserve(&fbuffer.buffer, edata->file,
501 sizeof(*entry) + edata->ep->tp.size + dsize,
506 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
507 if (edata->ep->event)
508 entry->type = edata->ep->event->event.type;
511 store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize);
513 trace_event_buffer_commit(&fbuffer);
517 * The event probe implementation uses event triggers to get access to
518 * the event it is attached to, but is not an actual trigger. The below
519 * functions are just stubs to fulfill what is needed to use the trigger
522 static int eprobe_trigger_init(struct event_trigger_ops *ops,
523 struct event_trigger_data *data)
528 static void eprobe_trigger_free(struct event_trigger_ops *ops,
529 struct event_trigger_data *data)
534 static int eprobe_trigger_print(struct seq_file *m,
535 struct event_trigger_ops *ops,
536 struct event_trigger_data *data)
538 /* Do not print eprobe event triggers */
542 static void eprobe_trigger_func(struct event_trigger_data *data,
543 struct trace_buffer *buffer, void *rec,
544 struct ring_buffer_event *rbe)
546 struct eprobe_data *edata = data->private_data;
548 __eprobe_trace_func(edata, rec);
551 static struct event_trigger_ops eprobe_trigger_ops = {
552 .func = eprobe_trigger_func,
553 .print = eprobe_trigger_print,
554 .init = eprobe_trigger_init,
555 .free = eprobe_trigger_free,
558 static int eprobe_trigger_cmd_func(struct event_command *cmd_ops,
559 struct trace_event_file *file,
560 char *glob, char *cmd, char *param)
565 static int eprobe_trigger_reg_func(char *glob, struct event_trigger_ops *ops,
566 struct event_trigger_data *data,
567 struct trace_event_file *file)
572 static void eprobe_trigger_unreg_func(char *glob, struct event_trigger_ops *ops,
573 struct event_trigger_data *data,
574 struct trace_event_file *file)
579 static struct event_trigger_ops *eprobe_trigger_get_ops(char *cmd,
582 return &eprobe_trigger_ops;
585 static struct event_command event_trigger_cmd = {
587 .trigger_type = ETT_EVENT_EPROBE,
588 .flags = EVENT_CMD_FL_NEEDS_REC,
589 .func = eprobe_trigger_cmd_func,
590 .reg = eprobe_trigger_reg_func,
591 .unreg = eprobe_trigger_unreg_func,
593 .get_trigger_ops = eprobe_trigger_get_ops,
597 static struct event_trigger_data *
598 new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file)
600 struct event_trigger_data *trigger;
601 struct eprobe_data *edata;
603 edata = kzalloc(sizeof(*edata), GFP_KERNEL);
604 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
605 if (!trigger || !edata) {
608 return ERR_PTR(-ENOMEM);
611 trigger->flags = EVENT_TRIGGER_FL_PROBE;
613 trigger->ops = &eprobe_trigger_ops;
616 * EVENT PROBE triggers are not registered as commands with
617 * register_event_command(), as they are not controlled by the user
618 * from the trigger file
620 trigger->cmd_ops = &event_trigger_cmd;
622 INIT_LIST_HEAD(&trigger->list);
623 RCU_INIT_POINTER(trigger->filter, NULL);
627 trigger->private_data = edata;
632 static int enable_eprobe(struct trace_eprobe *ep,
633 struct trace_event_file *eprobe_file)
635 struct event_trigger_data *trigger;
636 struct trace_event_file *file;
637 struct trace_array *tr = eprobe_file->tr;
639 file = find_event_file(tr, ep->event_system, ep->event_name);
642 trigger = new_eprobe_trigger(ep, eprobe_file);
644 return PTR_ERR(trigger);
646 list_add_tail_rcu(&trigger->list, &file->triggers);
648 trace_event_trigger_enable_disable(file, 1);
649 update_cond_flag(file);
654 static struct trace_event_functions eprobe_funcs = {
655 .trace = print_eprobe_event
658 static int disable_eprobe(struct trace_eprobe *ep,
659 struct trace_array *tr)
661 struct event_trigger_data *trigger;
662 struct trace_event_file *file;
663 struct eprobe_data *edata;
665 file = find_event_file(tr, ep->event_system, ep->event_name);
669 list_for_each_entry(trigger, &file->triggers, list) {
670 if (!(trigger->flags & EVENT_TRIGGER_FL_PROBE))
672 edata = trigger->private_data;
676 if (list_entry_is_head(trigger, &file->triggers, list))
679 list_del_rcu(&trigger->list);
681 trace_event_trigger_enable_disable(file, 0);
682 update_cond_flag(file);
684 /* Make sure nothing is using the edata or trigger */
685 tracepoint_synchronize_unregister();
693 static int enable_trace_eprobe(struct trace_event_call *call,
694 struct trace_event_file *file)
696 struct trace_probe *pos, *tp;
697 struct trace_eprobe *ep;
701 tp = trace_probe_primary_from_call(call);
702 if (WARN_ON_ONCE(!tp))
704 enabled = trace_probe_is_enabled(tp);
706 /* This also changes "enabled" state */
708 ret = trace_probe_add_file(tp, file);
712 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
717 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
718 ep = container_of(pos, struct trace_eprobe, tp);
719 ret = enable_eprobe(ep, file);
726 /* Failed to enable one of them. Roll back all */
728 disable_eprobe(ep, file->tr);
730 trace_probe_remove_file(tp, file);
732 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
738 static int disable_trace_eprobe(struct trace_event_call *call,
739 struct trace_event_file *file)
741 struct trace_probe *pos, *tp;
742 struct trace_eprobe *ep;
744 tp = trace_probe_primary_from_call(call);
745 if (WARN_ON_ONCE(!tp))
749 if (!trace_probe_get_file_link(tp, file))
751 if (!trace_probe_has_single_file(tp))
753 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
755 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
757 if (!trace_probe_is_enabled(tp)) {
758 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
759 ep = container_of(pos, struct trace_eprobe, tp);
760 disable_eprobe(ep, file->tr);
767 * Synchronization is done in below function. For perf event,
768 * file == NULL and perf_trace_event_unreg() calls
769 * tracepoint_synchronize_unregister() to ensure synchronize
770 * event. We don't need to care about it.
772 trace_probe_remove_file(tp, file);
777 static int eprobe_register(struct trace_event_call *event,
778 enum trace_reg type, void *data)
780 struct trace_event_file *file = data;
783 case TRACE_REG_REGISTER:
784 return enable_trace_eprobe(event, file);
785 case TRACE_REG_UNREGISTER:
786 return disable_trace_eprobe(event, file);
787 #ifdef CONFIG_PERF_EVENTS
788 case TRACE_REG_PERF_REGISTER:
789 case TRACE_REG_PERF_UNREGISTER:
790 case TRACE_REG_PERF_OPEN:
791 case TRACE_REG_PERF_CLOSE:
792 case TRACE_REG_PERF_ADD:
793 case TRACE_REG_PERF_DEL:
800 static inline void init_trace_eprobe_call(struct trace_eprobe *ep)
802 struct trace_event_call *call = trace_probe_event_call(&ep->tp);
804 call->flags = TRACE_EVENT_FL_EPROBE;
805 call->event.funcs = &eprobe_funcs;
806 call->class->fields_array = eprobe_fields_array;
807 call->class->reg = eprobe_register;
810 static struct trace_event_call *
811 find_and_get_event(const char *system, const char *event_name)
813 struct trace_event_call *tp_event;
816 list_for_each_entry(tp_event, &ftrace_events, list) {
817 /* Skip other probes and ftrace events */
818 if (tp_event->flags &
819 (TRACE_EVENT_FL_IGNORE_ENABLE |
820 TRACE_EVENT_FL_KPROBE |
821 TRACE_EVENT_FL_UPROBE |
822 TRACE_EVENT_FL_EPROBE))
824 if (!tp_event->class->system ||
825 strcmp(system, tp_event->class->system))
827 name = trace_event_name(tp_event);
828 if (!name || strcmp(event_name, name))
830 if (!trace_event_try_get_ref(tp_event)) {
840 static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[], int i)
842 unsigned int flags = TPARG_FL_KERNEL | TPARG_FL_TPOINT;
845 ret = traceprobe_parse_probe_arg(&ep->tp, i, argv[i], flags);
849 if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG)
850 ret = trace_eprobe_tp_arg_update(ep, i);
855 static int __trace_eprobe_create(int argc, const char *argv[])
859 * e[:[GRP/]ENAME] SYSTEM.EVENT [FETCHARGS]
861 * <name>=$<field>[:TYPE]
863 const char *event = NULL, *group = EPROBE_EVENT_SYSTEM;
864 const char *sys_event = NULL, *sys_name = NULL;
865 struct trace_event_call *event_call;
866 struct trace_eprobe *ep = NULL;
867 char buf1[MAX_EVENT_NAME_LEN];
868 char buf2[MAX_EVENT_NAME_LEN];
872 if (argc < 2 || argv[0][0] != 'e')
875 trace_probe_log_init("event_probe", argc, argv);
877 event = strchr(&argv[0][1], ':');
880 ret = traceprobe_parse_event_name(&event, &group, buf1,
885 strscpy(buf1, argv[1], MAX_EVENT_NAME_LEN);
886 sanitize_event_name(buf1);
889 if (!is_good_name(event) || !is_good_name(group))
893 ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2,
894 sys_event - argv[1]);
895 if (ret || !sys_name)
897 if (!is_good_name(sys_event) || !is_good_name(sys_name))
900 mutex_lock(&event_mutex);
901 event_call = find_and_get_event(sys_name, sys_event);
902 ep = alloc_event_probe(group, event, event_call, argc - 2);
903 mutex_unlock(&event_mutex);
907 /* This must return -ENOMEM, else there is a bug */
908 WARN_ON_ONCE(ret != -ENOMEM);
913 argc -= 2; argv += 2;
914 /* parse arguments */
915 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
916 trace_probe_log_set_index(i + 2);
917 ret = trace_eprobe_tp_update_arg(ep, argv, i);
921 ret = traceprobe_set_print_fmt(&ep->tp, PROBE_PRINT_EVENT);
924 init_trace_eprobe_call(ep);
925 mutex_lock(&event_mutex);
926 ret = trace_probe_register_event_call(&ep->tp);
928 if (ret == -EEXIST) {
929 trace_probe_log_set_index(0);
930 trace_probe_log_err(0, EVENT_EXIST);
932 mutex_unlock(&event_mutex);
935 ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
936 mutex_unlock(&event_mutex);
941 trace_event_probe_cleanup(ep);
946 * Register dynevent at core_initcall. This allows kernel to setup eprobe
947 * events in postcore_initcall without tracefs.
949 static __init int trace_events_eprobe_init_early(void)
953 err = dyn_event_register(&eprobe_dyn_event_ops);
955 pr_warn("Could not register eprobe_dyn_event_ops\n");
959 core_initcall(trace_events_eprobe_init_early);