1 // SPDX-License-Identifier: GPL-2.0
3 * uprobes-based tracing events
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
10 #include <linux/ctype.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/uprobes.h>
14 #include <linux/namei.h>
15 #include <linux/string.h>
16 #include <linux/rculist.h>
18 #include "trace_dynevent.h"
19 #include "trace_probe.h"
20 #include "trace_probe_tmpl.h"
22 #define UPROBE_EVENT_SYSTEM "uprobes"
24 struct uprobe_trace_entry_head {
25 struct trace_entry ent;
26 unsigned long vaddr[];
29 #define SIZEOF_TRACE_ENTRY(is_return) \
30 (sizeof(struct uprobe_trace_entry_head) + \
31 sizeof(unsigned long) * (is_return ? 2 : 1))
33 #define DATAOF_TRACE_ENTRY(entry, is_return) \
34 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
36 struct trace_uprobe_filter {
39 struct list_head perf_events;
42 static int trace_uprobe_create(int argc, const char **argv);
43 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
44 static int trace_uprobe_release(struct dyn_event *ev);
45 static bool trace_uprobe_is_busy(struct dyn_event *ev);
46 static bool trace_uprobe_match(const char *system, const char *event,
47 struct dyn_event *ev);
49 static struct dyn_event_operations trace_uprobe_ops = {
50 .create = trace_uprobe_create,
51 .show = trace_uprobe_show,
52 .is_busy = trace_uprobe_is_busy,
53 .free = trace_uprobe_release,
54 .match = trace_uprobe_match,
58 * uprobe event core functions
61 struct dyn_event devent;
62 struct trace_uprobe_filter filter;
63 struct uprobe_consumer consumer;
68 unsigned long ref_ctr_offset;
70 struct trace_probe tp;
73 static bool is_trace_uprobe(struct dyn_event *ev)
75 return ev->ops == &trace_uprobe_ops;
78 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
80 return container_of(ev, struct trace_uprobe, devent);
84 * for_each_trace_uprobe - iterate over the trace_uprobe list
85 * @pos: the struct trace_uprobe * for each entry
86 * @dpos: the struct dyn_event * to use as a loop cursor
88 #define for_each_trace_uprobe(pos, dpos) \
89 for_each_dyn_event(dpos) \
90 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
92 #define SIZEOF_TRACE_UPROBE(n) \
93 (offsetof(struct trace_uprobe, tp.args) + \
94 (sizeof(struct probe_arg) * (n)))
96 static int register_uprobe_event(struct trace_uprobe *tu);
97 static int unregister_uprobe_event(struct trace_uprobe *tu);
99 struct uprobe_dispatch_data {
100 struct trace_uprobe *tu;
101 unsigned long bp_addr;
104 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
105 static int uretprobe_dispatcher(struct uprobe_consumer *con,
106 unsigned long func, struct pt_regs *regs);
108 #ifdef CONFIG_STACK_GROWSUP
109 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
111 return addr - (n * sizeof(long));
114 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
116 return addr + (n * sizeof(long));
120 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
123 unsigned long addr = user_stack_pointer(regs);
125 addr = adjust_stack_addr(addr, n);
127 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
134 * Uprobes-specific fetch functions
136 static nokprobe_inline int
137 probe_mem_read(void *dest, void *src, size_t size)
139 void __user *vaddr = (void __force __user *)src;
141 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
144 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
145 * length and relative data location.
147 static nokprobe_inline int
148 fetch_store_string(unsigned long addr, void *dest, void *base)
151 u32 loc = *(u32 *)dest;
152 int maxlen = get_loc_len(loc);
153 u8 *dst = get_loc_data(dest, base);
154 void __user *src = (void __force __user *) addr;
156 if (unlikely(!maxlen))
159 if (addr == FETCH_TOKEN_COMM)
160 ret = strlcpy(dst, current->comm, maxlen);
162 ret = strncpy_from_user(dst, src, maxlen);
168 * Include the terminating null byte. In this case it
169 * was copied by strncpy_from_user but not accounted
173 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
179 /* Return the length of string -- including null terminal byte */
180 static nokprobe_inline int
181 fetch_store_strlen(unsigned long addr)
184 void __user *vaddr = (void __force __user *) addr;
186 if (addr == FETCH_TOKEN_COMM)
187 len = strlen(current->comm) + 1;
189 len = strnlen_user(vaddr, MAX_STRING_SIZE);
191 return (len > MAX_STRING_SIZE) ? 0 : len;
194 static unsigned long translate_user_vaddr(unsigned long file_offset)
196 unsigned long base_addr;
197 struct uprobe_dispatch_data *udd;
199 udd = (void *) current->utask->vaddr;
201 base_addr = udd->bp_addr - udd->tu->offset;
202 return base_addr + file_offset;
205 /* Note that we don't verify it, since the code does not come from user space */
207 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
212 /* 1st stage: get value from context */
215 val = regs_get_register(regs, code->param);
218 val = get_user_stack_nth(regs, code->param);
220 case FETCH_OP_STACKP:
221 val = user_stack_pointer(regs);
223 case FETCH_OP_RETVAL:
224 val = regs_return_value(regs);
227 val = code->immediate;
230 val = FETCH_TOKEN_COMM;
233 val = translate_user_vaddr(code->immediate);
240 return process_fetch_insn_bottom(code, val, dest, base);
242 NOKPROBE_SYMBOL(process_fetch_insn)
244 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
246 rwlock_init(&filter->rwlock);
247 filter->nr_systemwide = 0;
248 INIT_LIST_HEAD(&filter->perf_events);
251 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
253 return !filter->nr_systemwide && list_empty(&filter->perf_events);
256 static inline bool is_ret_probe(struct trace_uprobe *tu)
258 return tu->consumer.ret_handler != NULL;
261 static bool trace_uprobe_is_busy(struct dyn_event *ev)
263 struct trace_uprobe *tu = to_trace_uprobe(ev);
265 return trace_probe_is_enabled(&tu->tp);
268 static bool trace_uprobe_match(const char *system, const char *event,
269 struct dyn_event *ev)
271 struct trace_uprobe *tu = to_trace_uprobe(ev);
273 return strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
274 (!system || strcmp(tu->tp.call.class->system, system) == 0);
278 * Allocate new trace_uprobe and initialize it (including uprobes).
280 static struct trace_uprobe *
281 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
283 struct trace_uprobe *tu;
285 if (!event || !group)
286 return ERR_PTR(-EINVAL);
288 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
290 return ERR_PTR(-ENOMEM);
292 tu->tp.call.class = &tu->tp.class;
293 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
294 if (!tu->tp.call.name)
297 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
298 if (!tu->tp.class.system)
301 dyn_event_init(&tu->devent, &trace_uprobe_ops);
302 INIT_LIST_HEAD(&tu->tp.files);
303 tu->consumer.handler = uprobe_dispatcher;
305 tu->consumer.ret_handler = uretprobe_dispatcher;
306 init_trace_uprobe_filter(&tu->filter);
310 kfree(tu->tp.call.name);
313 return ERR_PTR(-ENOMEM);
316 static void free_trace_uprobe(struct trace_uprobe *tu)
323 for (i = 0; i < tu->tp.nr_args; i++)
324 traceprobe_free_probe_arg(&tu->tp.args[i]);
327 kfree(tu->tp.call.class->system);
328 kfree(tu->tp.call.name);
333 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
335 struct dyn_event *pos;
336 struct trace_uprobe *tu;
338 for_each_trace_uprobe(tu, pos)
339 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
340 strcmp(tu->tp.call.class->system, group) == 0)
346 /* Unregister a trace_uprobe and probe_event */
347 static int unregister_trace_uprobe(struct trace_uprobe *tu)
351 ret = unregister_uprobe_event(tu);
355 dyn_event_remove(&tu->devent);
356 free_trace_uprobe(tu);
361 * Uprobe with multiple reference counter is not allowed. i.e.
362 * If inode and offset matches, reference counter offset *must*
363 * match as well. Though, there is one exception: If user is
364 * replacing old trace_uprobe with new one(same group/event),
365 * then we allow same uprobe with new reference counter as far
366 * as the new one does not conflict with any other existing
369 static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
371 struct dyn_event *pos;
372 struct trace_uprobe *tmp, *old = NULL;
373 struct inode *new_inode = d_real_inode(new->path.dentry);
375 old = find_probe_event(trace_event_name(&new->tp.call),
376 new->tp.call.class->system);
378 for_each_trace_uprobe(tmp, pos) {
379 if ((old ? old != tmp : true) &&
380 new_inode == d_real_inode(tmp->path.dentry) &&
381 new->offset == tmp->offset &&
382 new->ref_ctr_offset != tmp->ref_ctr_offset) {
383 pr_warn("Reference counter offset mismatch.");
384 return ERR_PTR(-EINVAL);
390 /* Register a trace_uprobe and probe_event */
391 static int register_trace_uprobe(struct trace_uprobe *tu)
393 struct trace_uprobe *old_tu;
396 mutex_lock(&event_mutex);
398 /* register as an event */
399 old_tu = find_old_trace_uprobe(tu);
400 if (IS_ERR(old_tu)) {
401 ret = PTR_ERR(old_tu);
406 /* delete old event */
407 ret = unregister_trace_uprobe(old_tu);
412 ret = register_uprobe_event(tu);
414 pr_warn("Failed to register probe event(%d)\n", ret);
418 dyn_event_add(&tu->devent);
421 mutex_unlock(&event_mutex);
428 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
430 * - Remove uprobe: -:[GRP/]EVENT
432 static int trace_uprobe_create(int argc, const char **argv)
434 struct trace_uprobe *tu;
435 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
436 char *arg, *filename, *rctr, *rctr_end, *tmp;
437 char buf[MAX_EVENT_NAME_LEN];
439 unsigned long offset, ref_ctr_offset;
440 bool is_return = false;
446 /* argc must be >= 1 */
447 if (argv[0][0] == 'r')
449 else if (argv[0][0] != 'p' || argc < 2)
452 if (argv[0][1] == ':')
455 if (!strchr(argv[1], '/'))
458 filename = kstrdup(argv[1], GFP_KERNEL);
462 /* Find the last occurrence, in case the path contains ':' too. */
463 arg = strrchr(filename, ':');
464 if (!arg || !isdigit(arg[1])) {
469 trace_probe_log_init("trace_uprobe", argc, argv);
470 trace_probe_log_set_index(1); /* filename is the 2nd argument */
473 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
475 trace_probe_log_err(0, FILE_NOT_FOUND);
477 trace_probe_log_clear();
480 if (!d_is_reg(path.dentry)) {
481 trace_probe_log_err(0, NO_REGULAR_FILE);
483 goto fail_address_parse;
486 /* Parse reference counter offset if specified. */
487 rctr = strchr(arg, '(');
489 rctr_end = strchr(rctr, ')');
492 rctr_end = rctr + strlen(rctr);
493 trace_probe_log_err(rctr_end - filename,
495 goto fail_address_parse;
496 } else if (rctr_end[1] != '\0') {
498 trace_probe_log_err(rctr_end + 1 - filename,
500 goto fail_address_parse;
505 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
507 trace_probe_log_err(rctr - filename, BAD_REFCNT);
508 goto fail_address_parse;
512 /* Parse uprobe offset. */
513 ret = kstrtoul(arg, 0, &offset);
515 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
516 goto fail_address_parse;
520 trace_probe_log_set_index(0);
522 ret = traceprobe_parse_event_name(&event, &group, buf,
525 goto fail_address_parse;
530 tail = kstrdup(kbasename(filename), GFP_KERNEL);
533 goto fail_address_parse;
536 ptr = strpbrk(tail, ".-_");
540 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
548 tu = alloc_trace_uprobe(group, event, argc, is_return);
551 /* This must return -ENOMEM otherwise there is a bug */
552 WARN_ON_ONCE(ret != -ENOMEM);
553 goto fail_address_parse;
556 tu->ref_ctr_offset = ref_ctr_offset;
558 tu->filename = filename;
560 /* parse arguments */
561 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
562 tmp = kstrdup(argv[i], GFP_KERNEL);
568 trace_probe_log_set_index(i + 2);
569 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
570 is_return ? TPARG_FL_RETURN : 0);
576 ret = register_trace_uprobe(tu);
581 free_trace_uprobe(tu);
583 trace_probe_log_clear();
587 trace_probe_log_clear();
594 static int create_or_delete_trace_uprobe(int argc, char **argv)
598 if (argv[0][0] == '-')
599 return dyn_event_release(argc, argv, &trace_uprobe_ops);
601 ret = trace_uprobe_create(argc, (const char **)argv);
602 return ret == -ECANCELED ? -EINVAL : ret;
605 static int trace_uprobe_release(struct dyn_event *ev)
607 struct trace_uprobe *tu = to_trace_uprobe(ev);
609 return unregister_trace_uprobe(tu);
612 /* Probes listing interfaces */
613 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
615 struct trace_uprobe *tu = to_trace_uprobe(ev);
616 char c = is_ret_probe(tu) ? 'r' : 'p';
619 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
620 trace_event_name(&tu->tp.call), tu->filename,
621 (int)(sizeof(void *) * 2), tu->offset);
623 if (tu->ref_ctr_offset)
624 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
626 for (i = 0; i < tu->tp.nr_args; i++)
627 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
633 static int probes_seq_show(struct seq_file *m, void *v)
635 struct dyn_event *ev = v;
637 if (!is_trace_uprobe(ev))
640 return trace_uprobe_show(m, ev);
643 static const struct seq_operations probes_seq_op = {
644 .start = dyn_event_seq_start,
645 .next = dyn_event_seq_next,
646 .stop = dyn_event_seq_stop,
647 .show = probes_seq_show
650 static int probes_open(struct inode *inode, struct file *file)
654 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
655 ret = dyn_events_release_all(&trace_uprobe_ops);
660 return seq_open(file, &probes_seq_op);
663 static ssize_t probes_write(struct file *file, const char __user *buffer,
664 size_t count, loff_t *ppos)
666 return trace_parse_run_command(file, buffer, count, ppos,
667 create_or_delete_trace_uprobe);
670 static const struct file_operations uprobe_events_ops = {
671 .owner = THIS_MODULE,
675 .release = seq_release,
676 .write = probes_write,
679 /* Probes profiling interfaces */
680 static int probes_profile_seq_show(struct seq_file *m, void *v)
682 struct dyn_event *ev = v;
683 struct trace_uprobe *tu;
685 if (!is_trace_uprobe(ev))
688 tu = to_trace_uprobe(ev);
689 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
690 trace_event_name(&tu->tp.call), tu->nhit);
694 static const struct seq_operations profile_seq_op = {
695 .start = dyn_event_seq_start,
696 .next = dyn_event_seq_next,
697 .stop = dyn_event_seq_stop,
698 .show = probes_profile_seq_show
701 static int profile_open(struct inode *inode, struct file *file)
703 return seq_open(file, &profile_seq_op);
706 static const struct file_operations uprobe_profile_ops = {
707 .owner = THIS_MODULE,
708 .open = profile_open,
711 .release = seq_release,
714 struct uprobe_cpu_buffer {
718 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
719 static int uprobe_buffer_refcnt;
721 static int uprobe_buffer_init(void)
725 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
726 if (uprobe_cpu_buffer == NULL)
729 for_each_possible_cpu(cpu) {
730 struct page *p = alloc_pages_node(cpu_to_node(cpu),
736 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
737 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
743 for_each_possible_cpu(cpu) {
746 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
749 free_percpu(uprobe_cpu_buffer);
753 static int uprobe_buffer_enable(void)
757 BUG_ON(!mutex_is_locked(&event_mutex));
759 if (uprobe_buffer_refcnt++ == 0) {
760 ret = uprobe_buffer_init();
762 uprobe_buffer_refcnt--;
768 static void uprobe_buffer_disable(void)
772 BUG_ON(!mutex_is_locked(&event_mutex));
774 if (--uprobe_buffer_refcnt == 0) {
775 for_each_possible_cpu(cpu)
776 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
779 free_percpu(uprobe_cpu_buffer);
780 uprobe_cpu_buffer = NULL;
784 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
786 struct uprobe_cpu_buffer *ucb;
789 cpu = raw_smp_processor_id();
790 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
793 * Use per-cpu buffers for fastest access, but we might migrate
794 * so the mutex makes sure we have sole access to it.
796 mutex_lock(&ucb->mutex);
801 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
803 mutex_unlock(&ucb->mutex);
806 static void __uprobe_trace_func(struct trace_uprobe *tu,
807 unsigned long func, struct pt_regs *regs,
808 struct uprobe_cpu_buffer *ucb, int dsize,
809 struct trace_event_file *trace_file)
811 struct uprobe_trace_entry_head *entry;
812 struct ring_buffer_event *event;
813 struct ring_buffer *buffer;
816 struct trace_event_call *call = &tu->tp.call;
818 WARN_ON(call != trace_file->event_call);
820 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
823 if (trace_trigger_soft_disabled(trace_file))
826 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
827 size = esize + tu->tp.size + dsize;
828 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
829 call->event.type, size, 0, 0);
833 entry = ring_buffer_event_data(event);
834 if (is_ret_probe(tu)) {
835 entry->vaddr[0] = func;
836 entry->vaddr[1] = instruction_pointer(regs);
837 data = DATAOF_TRACE_ENTRY(entry, true);
839 entry->vaddr[0] = instruction_pointer(regs);
840 data = DATAOF_TRACE_ENTRY(entry, false);
843 memcpy(data, ucb->buf, tu->tp.size + dsize);
845 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
849 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
850 struct uprobe_cpu_buffer *ucb, int dsize)
852 struct event_file_link *link;
854 if (is_ret_probe(tu))
858 list_for_each_entry_rcu(link, &tu->tp.files, list)
859 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
865 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
866 struct pt_regs *regs,
867 struct uprobe_cpu_buffer *ucb, int dsize)
869 struct event_file_link *link;
872 list_for_each_entry_rcu(link, &tu->tp.files, list)
873 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
877 /* Event entry printers */
878 static enum print_line_t
879 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
881 struct uprobe_trace_entry_head *entry;
882 struct trace_seq *s = &iter->seq;
883 struct trace_uprobe *tu;
886 entry = (struct uprobe_trace_entry_head *)iter->ent;
887 tu = container_of(event, struct trace_uprobe, tp.call.event);
889 if (is_ret_probe(tu)) {
890 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
891 trace_event_name(&tu->tp.call),
892 entry->vaddr[1], entry->vaddr[0]);
893 data = DATAOF_TRACE_ENTRY(entry, true);
895 trace_seq_printf(s, "%s: (0x%lx)",
896 trace_event_name(&tu->tp.call),
898 data = DATAOF_TRACE_ENTRY(entry, false);
901 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
904 trace_seq_putc(s, '\n');
907 return trace_handle_return(s);
910 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
911 enum uprobe_filter_ctx ctx,
912 struct mm_struct *mm);
915 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
916 filter_func_t filter)
918 bool enabled = trace_probe_is_enabled(&tu->tp);
919 struct event_file_link *link = NULL;
923 if (tu->tp.flags & TP_FLAG_PROFILE)
926 link = kmalloc(sizeof(*link), GFP_KERNEL);
931 list_add_tail_rcu(&link->list, &tu->tp.files);
933 tu->tp.flags |= TP_FLAG_TRACE;
935 if (tu->tp.flags & TP_FLAG_TRACE)
938 tu->tp.flags |= TP_FLAG_PROFILE;
941 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
946 ret = uprobe_buffer_enable();
950 tu->consumer.filter = filter;
951 tu->inode = d_real_inode(tu->path.dentry);
952 if (tu->ref_ctr_offset) {
953 ret = uprobe_register_refctr(tu->inode, tu->offset,
954 tu->ref_ctr_offset, &tu->consumer);
956 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
965 uprobe_buffer_disable();
969 list_del(&link->list);
971 tu->tp.flags &= ~TP_FLAG_TRACE;
973 tu->tp.flags &= ~TP_FLAG_PROFILE;
979 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
981 if (!trace_probe_is_enabled(&tu->tp))
985 struct event_file_link *link;
987 link = find_event_file_link(&tu->tp, file);
991 list_del_rcu(&link->list);
992 /* synchronize with u{,ret}probe_trace_func */
996 if (!list_empty(&tu->tp.files))
1000 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1002 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1004 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
1006 uprobe_buffer_disable();
1009 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1012 struct uprobe_trace_entry_head field;
1013 struct trace_uprobe *tu = event_call->data;
1015 if (is_ret_probe(tu)) {
1016 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1017 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1018 size = SIZEOF_TRACE_ENTRY(true);
1020 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1021 size = SIZEOF_TRACE_ENTRY(false);
1024 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1027 #ifdef CONFIG_PERF_EVENTS
1029 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1031 struct perf_event *event;
1033 if (filter->nr_systemwide)
1036 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1037 if (event->hw.target->mm == mm)
1045 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1047 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1050 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1054 write_lock(&tu->filter.rwlock);
1055 if (event->hw.target) {
1056 list_del(&event->hw.tp_list);
1057 done = tu->filter.nr_systemwide ||
1058 (event->hw.target->flags & PF_EXITING) ||
1059 uprobe_filter_event(tu, event);
1061 tu->filter.nr_systemwide--;
1062 done = tu->filter.nr_systemwide;
1064 write_unlock(&tu->filter.rwlock);
1067 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1072 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1077 write_lock(&tu->filter.rwlock);
1078 if (event->hw.target) {
1080 * event->parent != NULL means copy_process(), we can avoid
1081 * uprobe_apply(). current->mm must be probed and we can rely
1082 * on dup_mmap() which preserves the already installed bp's.
1084 * attr.enable_on_exec means that exec/mmap will install the
1085 * breakpoints we need.
1087 done = tu->filter.nr_systemwide ||
1088 event->parent || event->attr.enable_on_exec ||
1089 uprobe_filter_event(tu, event);
1090 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1092 done = tu->filter.nr_systemwide;
1093 tu->filter.nr_systemwide++;
1095 write_unlock(&tu->filter.rwlock);
1099 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1101 uprobe_perf_close(tu, event);
1106 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1107 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1109 struct trace_uprobe *tu;
1112 tu = container_of(uc, struct trace_uprobe, consumer);
1113 read_lock(&tu->filter.rwlock);
1114 ret = __uprobe_perf_filter(&tu->filter, mm);
1115 read_unlock(&tu->filter.rwlock);
1120 static void __uprobe_perf_func(struct trace_uprobe *tu,
1121 unsigned long func, struct pt_regs *regs,
1122 struct uprobe_cpu_buffer *ucb, int dsize)
1124 struct trace_event_call *call = &tu->tp.call;
1125 struct uprobe_trace_entry_head *entry;
1126 struct hlist_head *head;
1131 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1134 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1136 size = esize + tu->tp.size + dsize;
1137 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1138 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1142 head = this_cpu_ptr(call->perf_events);
1143 if (hlist_empty(head))
1146 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1150 if (is_ret_probe(tu)) {
1151 entry->vaddr[0] = func;
1152 entry->vaddr[1] = instruction_pointer(regs);
1153 data = DATAOF_TRACE_ENTRY(entry, true);
1155 entry->vaddr[0] = instruction_pointer(regs);
1156 data = DATAOF_TRACE_ENTRY(entry, false);
1159 memcpy(data, ucb->buf, tu->tp.size + dsize);
1161 if (size - esize > tu->tp.size + dsize) {
1162 int len = tu->tp.size + dsize;
1164 memset(data + len, 0, size - esize - len);
1167 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1173 /* uprobe profile handler */
1174 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1175 struct uprobe_cpu_buffer *ucb, int dsize)
1177 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1178 return UPROBE_HANDLER_REMOVE;
1180 if (!is_ret_probe(tu))
1181 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1185 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1186 struct pt_regs *regs,
1187 struct uprobe_cpu_buffer *ucb, int dsize)
1189 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1192 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1193 const char **filename, u64 *probe_offset,
1194 bool perf_type_tracepoint)
1196 const char *pevent = trace_event_name(event->tp_event);
1197 const char *group = event->tp_event->class->system;
1198 struct trace_uprobe *tu;
1200 if (perf_type_tracepoint)
1201 tu = find_probe_event(pevent, group);
1203 tu = event->tp_event->data;
1207 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1208 : BPF_FD_TYPE_UPROBE;
1209 *filename = tu->filename;
1210 *probe_offset = tu->offset;
1213 #endif /* CONFIG_PERF_EVENTS */
1216 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1219 struct trace_uprobe *tu = event->data;
1220 struct trace_event_file *file = data;
1223 case TRACE_REG_REGISTER:
1224 return probe_event_enable(tu, file, NULL);
1226 case TRACE_REG_UNREGISTER:
1227 probe_event_disable(tu, file);
1230 #ifdef CONFIG_PERF_EVENTS
1231 case TRACE_REG_PERF_REGISTER:
1232 return probe_event_enable(tu, NULL, uprobe_perf_filter);
1234 case TRACE_REG_PERF_UNREGISTER:
1235 probe_event_disable(tu, NULL);
1238 case TRACE_REG_PERF_OPEN:
1239 return uprobe_perf_open(tu, data);
1241 case TRACE_REG_PERF_CLOSE:
1242 return uprobe_perf_close(tu, data);
1251 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1253 struct trace_uprobe *tu;
1254 struct uprobe_dispatch_data udd;
1255 struct uprobe_cpu_buffer *ucb;
1260 tu = container_of(con, struct trace_uprobe, consumer);
1264 udd.bp_addr = instruction_pointer(regs);
1266 current->utask->vaddr = (unsigned long) &udd;
1268 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1271 dsize = __get_data_size(&tu->tp, regs);
1272 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1274 ucb = uprobe_buffer_get();
1275 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1277 if (tu->tp.flags & TP_FLAG_TRACE)
1278 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1280 #ifdef CONFIG_PERF_EVENTS
1281 if (tu->tp.flags & TP_FLAG_PROFILE)
1282 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1284 uprobe_buffer_put(ucb);
1288 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1289 unsigned long func, struct pt_regs *regs)
1291 struct trace_uprobe *tu;
1292 struct uprobe_dispatch_data udd;
1293 struct uprobe_cpu_buffer *ucb;
1296 tu = container_of(con, struct trace_uprobe, consumer);
1301 current->utask->vaddr = (unsigned long) &udd;
1303 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1306 dsize = __get_data_size(&tu->tp, regs);
1307 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1309 ucb = uprobe_buffer_get();
1310 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1312 if (tu->tp.flags & TP_FLAG_TRACE)
1313 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1315 #ifdef CONFIG_PERF_EVENTS
1316 if (tu->tp.flags & TP_FLAG_PROFILE)
1317 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1319 uprobe_buffer_put(ucb);
1323 static struct trace_event_functions uprobe_funcs = {
1324 .trace = print_uprobe_event
1327 static inline void init_trace_event_call(struct trace_uprobe *tu,
1328 struct trace_event_call *call)
1330 INIT_LIST_HEAD(&call->class->fields);
1331 call->event.funcs = &uprobe_funcs;
1332 call->class->define_fields = uprobe_event_define_fields;
1334 call->flags = TRACE_EVENT_FL_UPROBE;
1335 call->class->reg = trace_uprobe_register;
1339 static int register_uprobe_event(struct trace_uprobe *tu)
1341 struct trace_event_call *call = &tu->tp.call;
1344 init_trace_event_call(tu, call);
1346 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1349 ret = register_trace_event(&call->event);
1351 kfree(call->print_fmt);
1355 ret = trace_add_event_call(call);
1358 pr_info("Failed to register uprobe event: %s\n",
1359 trace_event_name(call));
1360 kfree(call->print_fmt);
1361 unregister_trace_event(&call->event);
1367 static int unregister_uprobe_event(struct trace_uprobe *tu)
1371 /* tu->event is unregistered in trace_remove_event_call() */
1372 ret = trace_remove_event_call(&tu->tp.call);
1375 kfree(tu->tp.call.print_fmt);
1376 tu->tp.call.print_fmt = NULL;
1380 #ifdef CONFIG_PERF_EVENTS
1381 struct trace_event_call *
1382 create_local_trace_uprobe(char *name, unsigned long offs,
1383 unsigned long ref_ctr_offset, bool is_return)
1385 struct trace_uprobe *tu;
1389 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1391 return ERR_PTR(ret);
1393 if (!d_is_reg(path.dentry)) {
1395 return ERR_PTR(-EINVAL);
1399 * local trace_kprobes are not added to dyn_event, so they are never
1400 * searched in find_trace_kprobe(). Therefore, there is no concern of
1401 * duplicated name "DUMMY_EVENT" here.
1403 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1407 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1410 return ERR_CAST(tu);
1415 tu->ref_ctr_offset = ref_ctr_offset;
1416 tu->filename = kstrdup(name, GFP_KERNEL);
1417 init_trace_event_call(tu, &tu->tp.call);
1419 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1424 return &tu->tp.call;
1426 free_trace_uprobe(tu);
1427 return ERR_PTR(ret);
1430 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1432 struct trace_uprobe *tu;
1434 tu = container_of(event_call, struct trace_uprobe, tp.call);
1436 kfree(tu->tp.call.print_fmt);
1437 tu->tp.call.print_fmt = NULL;
1439 free_trace_uprobe(tu);
1441 #endif /* CONFIG_PERF_EVENTS */
1443 /* Make a trace interface for controling probe points */
1444 static __init int init_uprobe_trace(void)
1446 struct dentry *d_tracer;
1449 ret = dyn_event_register(&trace_uprobe_ops);
1453 d_tracer = tracing_init_dentry();
1454 if (IS_ERR(d_tracer))
1457 trace_create_file("uprobe_events", 0644, d_tracer,
1458 NULL, &uprobe_events_ops);
1459 /* Profile interface */
1460 trace_create_file("uprobe_profile", 0444, d_tracer,
1461 NULL, &uprobe_profile_ops);
1465 fs_initcall(init_uprobe_trace);