1 // SPDX-License-Identifier: GPL-2.0
3 * uprobes-based tracing events
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
8 #define pr_fmt(fmt) "trace_uprobe: " fmt
10 #include <linux/security.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/uprobes.h>
15 #include <linux/namei.h>
16 #include <linux/string.h>
17 #include <linux/rculist.h>
19 #include "trace_dynevent.h"
20 #include "trace_probe.h"
21 #include "trace_probe_tmpl.h"
23 #define UPROBE_EVENT_SYSTEM "uprobes"
25 struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
30 #define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
34 #define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
37 struct trace_uprobe_filter {
40 struct list_head perf_events;
43 static int trace_uprobe_create(int argc, const char **argv);
44 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
45 static int trace_uprobe_release(struct dyn_event *ev);
46 static bool trace_uprobe_is_busy(struct dyn_event *ev);
47 static bool trace_uprobe_match(const char *system, const char *event,
48 int argc, const char **argv, struct dyn_event *ev);
50 static struct dyn_event_operations trace_uprobe_ops = {
51 .create = trace_uprobe_create,
52 .show = trace_uprobe_show,
53 .is_busy = trace_uprobe_is_busy,
54 .free = trace_uprobe_release,
55 .match = trace_uprobe_match,
59 * uprobe event core functions
62 struct dyn_event devent;
63 struct uprobe_consumer consumer;
68 unsigned long ref_ctr_offset;
70 struct trace_probe tp;
73 static bool is_trace_uprobe(struct dyn_event *ev)
75 return ev->ops == &trace_uprobe_ops;
78 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
80 return container_of(ev, struct trace_uprobe, devent);
84 * for_each_trace_uprobe - iterate over the trace_uprobe list
85 * @pos: the struct trace_uprobe * for each entry
86 * @dpos: the struct dyn_event * to use as a loop cursor
88 #define for_each_trace_uprobe(pos, dpos) \
89 for_each_dyn_event(dpos) \
90 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
92 #define SIZEOF_TRACE_UPROBE(n) \
93 (offsetof(struct trace_uprobe, tp.args) + \
94 (sizeof(struct probe_arg) * (n)))
96 static int register_uprobe_event(struct trace_uprobe *tu);
97 static int unregister_uprobe_event(struct trace_uprobe *tu);
99 struct uprobe_dispatch_data {
100 struct trace_uprobe *tu;
101 unsigned long bp_addr;
104 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
105 static int uretprobe_dispatcher(struct uprobe_consumer *con,
106 unsigned long func, struct pt_regs *regs);
108 #ifdef CONFIG_STACK_GROWSUP
109 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
111 return addr - (n * sizeof(long));
114 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
116 return addr + (n * sizeof(long));
120 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
123 unsigned long addr = user_stack_pointer(regs);
125 addr = adjust_stack_addr(addr, n);
127 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
134 * Uprobes-specific fetch functions
136 static nokprobe_inline int
137 probe_mem_read(void *dest, void *src, size_t size)
139 void __user *vaddr = (void __force __user *)src;
141 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
144 static nokprobe_inline int
145 probe_mem_read_user(void *dest, void *src, size_t size)
147 return probe_mem_read(dest, src, size);
151 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
152 * length and relative data location.
154 static nokprobe_inline int
155 fetch_store_string(unsigned long addr, void *dest, void *base)
158 u32 loc = *(u32 *)dest;
159 int maxlen = get_loc_len(loc);
160 u8 *dst = get_loc_data(dest, base);
161 void __user *src = (void __force __user *) addr;
163 if (unlikely(!maxlen))
166 if (addr == FETCH_TOKEN_COMM)
167 ret = strlcpy(dst, current->comm, maxlen);
169 ret = strncpy_from_user(dst, src, maxlen);
175 * Include the terminating null byte. In this case it
176 * was copied by strncpy_from_user but not accounted
180 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
186 static nokprobe_inline int
187 fetch_store_string_user(unsigned long addr, void *dest, void *base)
189 return fetch_store_string(addr, dest, base);
192 /* Return the length of string -- including null terminal byte */
193 static nokprobe_inline int
194 fetch_store_strlen(unsigned long addr)
197 void __user *vaddr = (void __force __user *) addr;
199 if (addr == FETCH_TOKEN_COMM)
200 len = strlen(current->comm) + 1;
202 len = strnlen_user(vaddr, MAX_STRING_SIZE);
204 return (len > MAX_STRING_SIZE) ? 0 : len;
207 static nokprobe_inline int
208 fetch_store_strlen_user(unsigned long addr)
210 return fetch_store_strlen(addr);
213 static unsigned long translate_user_vaddr(unsigned long file_offset)
215 unsigned long base_addr;
216 struct uprobe_dispatch_data *udd;
218 udd = (void *) current->utask->vaddr;
220 base_addr = udd->bp_addr - udd->tu->offset;
221 return base_addr + file_offset;
224 /* Note that we don't verify it, since the code does not come from user space */
226 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
231 /* 1st stage: get value from context */
234 val = regs_get_register(regs, code->param);
237 val = get_user_stack_nth(regs, code->param);
239 case FETCH_OP_STACKP:
240 val = user_stack_pointer(regs);
242 case FETCH_OP_RETVAL:
243 val = regs_return_value(regs);
246 val = code->immediate;
249 val = FETCH_TOKEN_COMM;
252 val = (unsigned long)code->data;
255 val = translate_user_vaddr(code->immediate);
262 return process_fetch_insn_bottom(code, val, dest, base);
264 NOKPROBE_SYMBOL(process_fetch_insn)
266 static struct trace_uprobe_filter *
267 trace_uprobe_get_filter(struct trace_uprobe *tu)
269 struct trace_probe_event *event = tu->tp.event;
271 return (struct trace_uprobe_filter *)&event->data[0];
274 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
276 rwlock_init(&filter->rwlock);
277 filter->nr_systemwide = 0;
278 INIT_LIST_HEAD(&filter->perf_events);
281 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
283 return !filter->nr_systemwide && list_empty(&filter->perf_events);
286 static inline bool is_ret_probe(struct trace_uprobe *tu)
288 return tu->consumer.ret_handler != NULL;
291 static bool trace_uprobe_is_busy(struct dyn_event *ev)
293 struct trace_uprobe *tu = to_trace_uprobe(ev);
295 return trace_probe_is_enabled(&tu->tp);
298 static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
299 int argc, const char **argv)
301 char buf[MAX_ARGSTR_LEN + 1];
307 len = strlen(tu->filename);
308 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
311 if (tu->ref_ctr_offset == 0)
312 snprintf(buf, sizeof(buf), "0x%0*lx",
313 (int)(sizeof(void *) * 2), tu->offset);
315 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
316 (int)(sizeof(void *) * 2), tu->offset,
318 if (strcmp(buf, &argv[0][len + 1]))
323 return trace_probe_match_command_args(&tu->tp, argc, argv);
326 static bool trace_uprobe_match(const char *system, const char *event,
327 int argc, const char **argv, struct dyn_event *ev)
329 struct trace_uprobe *tu = to_trace_uprobe(ev);
331 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
332 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
333 trace_uprobe_match_command_head(tu, argc, argv);
336 static nokprobe_inline struct trace_uprobe *
337 trace_uprobe_primary_from_call(struct trace_event_call *call)
339 struct trace_probe *tp;
341 tp = trace_probe_primary_from_call(call);
342 if (WARN_ON_ONCE(!tp))
345 return container_of(tp, struct trace_uprobe, tp);
349 * Allocate new trace_uprobe and initialize it (including uprobes).
351 static struct trace_uprobe *
352 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
354 struct trace_uprobe *tu;
357 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
359 return ERR_PTR(-ENOMEM);
361 ret = trace_probe_init(&tu->tp, event, group,
362 sizeof(struct trace_uprobe_filter));
366 dyn_event_init(&tu->devent, &trace_uprobe_ops);
367 tu->consumer.handler = uprobe_dispatcher;
369 tu->consumer.ret_handler = uretprobe_dispatcher;
370 init_trace_uprobe_filter(trace_uprobe_get_filter(tu));
379 static void free_trace_uprobe(struct trace_uprobe *tu)
385 trace_probe_cleanup(&tu->tp);
390 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
392 struct dyn_event *pos;
393 struct trace_uprobe *tu;
395 for_each_trace_uprobe(tu, pos)
396 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
397 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
403 /* Unregister a trace_uprobe and probe_event */
404 static int unregister_trace_uprobe(struct trace_uprobe *tu)
408 if (trace_probe_has_sibling(&tu->tp))
411 ret = unregister_uprobe_event(tu);
416 dyn_event_remove(&tu->devent);
417 trace_probe_unlink(&tu->tp);
418 free_trace_uprobe(tu);
422 static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
423 struct trace_uprobe *comp)
425 struct trace_probe_event *tpe = orig->tp.event;
426 struct trace_probe *pos;
427 struct inode *comp_inode = d_real_inode(comp->path.dentry);
430 list_for_each_entry(pos, &tpe->probes, list) {
431 orig = container_of(pos, struct trace_uprobe, tp);
432 if (comp_inode != d_real_inode(orig->path.dentry) ||
433 comp->offset != orig->offset)
437 * trace_probe_compare_arg_type() ensured that nr_args and
438 * each argument name and type are same. Let's compare comm.
440 for (i = 0; i < orig->tp.nr_args; i++) {
441 if (strcmp(orig->tp.args[i].comm,
442 comp->tp.args[i].comm))
446 if (i == orig->tp.nr_args)
453 static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
457 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
459 /* Note that argument starts index = 2 */
460 trace_probe_log_set_index(ret + 1);
461 trace_probe_log_err(0, DIFF_ARG_TYPE);
464 if (trace_uprobe_has_same_uprobe(to, tu)) {
465 trace_probe_log_set_index(0);
466 trace_probe_log_err(0, SAME_PROBE);
470 /* Append to existing event */
471 ret = trace_probe_append(&tu->tp, &to->tp);
473 dyn_event_add(&tu->devent);
479 * Uprobe with multiple reference counter is not allowed. i.e.
480 * If inode and offset matches, reference counter offset *must*
481 * match as well. Though, there is one exception: If user is
482 * replacing old trace_uprobe with new one(same group/event),
483 * then we allow same uprobe with new reference counter as far
484 * as the new one does not conflict with any other existing
487 static int validate_ref_ctr_offset(struct trace_uprobe *new)
489 struct dyn_event *pos;
490 struct trace_uprobe *tmp;
491 struct inode *new_inode = d_real_inode(new->path.dentry);
493 for_each_trace_uprobe(tmp, pos) {
494 if (new_inode == d_real_inode(tmp->path.dentry) &&
495 new->offset == tmp->offset &&
496 new->ref_ctr_offset != tmp->ref_ctr_offset) {
497 pr_warn("Reference counter offset mismatch.");
504 /* Register a trace_uprobe and probe_event */
505 static int register_trace_uprobe(struct trace_uprobe *tu)
507 struct trace_uprobe *old_tu;
510 mutex_lock(&event_mutex);
512 ret = validate_ref_ctr_offset(tu);
516 /* register as an event */
517 old_tu = find_probe_event(trace_probe_name(&tu->tp),
518 trace_probe_group_name(&tu->tp));
520 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
521 trace_probe_log_set_index(0);
522 trace_probe_log_err(0, DIFF_PROBE_TYPE);
525 ret = append_trace_uprobe(tu, old_tu);
530 ret = register_uprobe_event(tu);
532 pr_warn("Failed to register probe event(%d)\n", ret);
536 dyn_event_add(&tu->devent);
539 mutex_unlock(&event_mutex);
546 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
548 static int trace_uprobe_create(int argc, const char **argv)
550 struct trace_uprobe *tu;
551 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
552 char *arg, *filename, *rctr, *rctr_end, *tmp;
553 char buf[MAX_EVENT_NAME_LEN];
555 unsigned long offset, ref_ctr_offset;
556 bool is_return = false;
562 switch (argv[0][0]) {
575 if (argv[0][1] == ':')
578 if (!strchr(argv[1], '/'))
581 filename = kstrdup(argv[1], GFP_KERNEL);
585 /* Find the last occurrence, in case the path contains ':' too. */
586 arg = strrchr(filename, ':');
587 if (!arg || !isdigit(arg[1])) {
592 trace_probe_log_init("trace_uprobe", argc, argv);
593 trace_probe_log_set_index(1); /* filename is the 2nd argument */
596 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
598 trace_probe_log_err(0, FILE_NOT_FOUND);
600 trace_probe_log_clear();
603 if (!d_is_reg(path.dentry)) {
604 trace_probe_log_err(0, NO_REGULAR_FILE);
606 goto fail_address_parse;
609 /* Parse reference counter offset if specified. */
610 rctr = strchr(arg, '(');
612 rctr_end = strchr(rctr, ')');
615 rctr_end = rctr + strlen(rctr);
616 trace_probe_log_err(rctr_end - filename,
618 goto fail_address_parse;
619 } else if (rctr_end[1] != '\0') {
621 trace_probe_log_err(rctr_end + 1 - filename,
623 goto fail_address_parse;
628 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
630 trace_probe_log_err(rctr - filename, BAD_REFCNT);
631 goto fail_address_parse;
635 /* Parse uprobe offset. */
636 ret = kstrtoul(arg, 0, &offset);
638 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
639 goto fail_address_parse;
643 trace_probe_log_set_index(0);
645 ret = traceprobe_parse_event_name(&event, &group, buf,
648 goto fail_address_parse;
653 tail = kstrdup(kbasename(filename), GFP_KERNEL);
656 goto fail_address_parse;
659 ptr = strpbrk(tail, ".-_");
663 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
671 tu = alloc_trace_uprobe(group, event, argc, is_return);
674 /* This must return -ENOMEM otherwise there is a bug */
675 WARN_ON_ONCE(ret != -ENOMEM);
676 goto fail_address_parse;
679 tu->ref_ctr_offset = ref_ctr_offset;
681 tu->filename = filename;
683 /* parse arguments */
684 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
685 tmp = kstrdup(argv[i], GFP_KERNEL);
691 trace_probe_log_set_index(i + 2);
692 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
693 is_return ? TPARG_FL_RETURN : 0);
699 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
703 ret = register_trace_uprobe(tu);
708 free_trace_uprobe(tu);
710 trace_probe_log_clear();
714 trace_probe_log_clear();
721 static int create_or_delete_trace_uprobe(int argc, char **argv)
725 if (argv[0][0] == '-')
726 return dyn_event_release(argc, argv, &trace_uprobe_ops);
728 ret = trace_uprobe_create(argc, (const char **)argv);
729 return ret == -ECANCELED ? -EINVAL : ret;
732 static int trace_uprobe_release(struct dyn_event *ev)
734 struct trace_uprobe *tu = to_trace_uprobe(ev);
736 return unregister_trace_uprobe(tu);
739 /* Probes listing interfaces */
740 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
742 struct trace_uprobe *tu = to_trace_uprobe(ev);
743 char c = is_ret_probe(tu) ? 'r' : 'p';
746 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
747 trace_probe_name(&tu->tp), tu->filename,
748 (int)(sizeof(void *) * 2), tu->offset);
750 if (tu->ref_ctr_offset)
751 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
753 for (i = 0; i < tu->tp.nr_args; i++)
754 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
760 static int probes_seq_show(struct seq_file *m, void *v)
762 struct dyn_event *ev = v;
764 if (!is_trace_uprobe(ev))
767 return trace_uprobe_show(m, ev);
770 static const struct seq_operations probes_seq_op = {
771 .start = dyn_event_seq_start,
772 .next = dyn_event_seq_next,
773 .stop = dyn_event_seq_stop,
774 .show = probes_seq_show
777 static int probes_open(struct inode *inode, struct file *file)
781 ret = security_locked_down(LOCKDOWN_TRACEFS);
785 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
786 ret = dyn_events_release_all(&trace_uprobe_ops);
791 return seq_open(file, &probes_seq_op);
794 static ssize_t probes_write(struct file *file, const char __user *buffer,
795 size_t count, loff_t *ppos)
797 return trace_parse_run_command(file, buffer, count, ppos,
798 create_or_delete_trace_uprobe);
801 static const struct file_operations uprobe_events_ops = {
802 .owner = THIS_MODULE,
806 .release = seq_release,
807 .write = probes_write,
810 /* Probes profiling interfaces */
811 static int probes_profile_seq_show(struct seq_file *m, void *v)
813 struct dyn_event *ev = v;
814 struct trace_uprobe *tu;
816 if (!is_trace_uprobe(ev))
819 tu = to_trace_uprobe(ev);
820 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
821 trace_probe_name(&tu->tp), tu->nhit);
825 static const struct seq_operations profile_seq_op = {
826 .start = dyn_event_seq_start,
827 .next = dyn_event_seq_next,
828 .stop = dyn_event_seq_stop,
829 .show = probes_profile_seq_show
832 static int profile_open(struct inode *inode, struct file *file)
836 ret = security_locked_down(LOCKDOWN_TRACEFS);
840 return seq_open(file, &profile_seq_op);
843 static const struct file_operations uprobe_profile_ops = {
844 .owner = THIS_MODULE,
845 .open = profile_open,
848 .release = seq_release,
851 struct uprobe_cpu_buffer {
855 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
856 static int uprobe_buffer_refcnt;
858 static int uprobe_buffer_init(void)
862 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
863 if (uprobe_cpu_buffer == NULL)
866 for_each_possible_cpu(cpu) {
867 struct page *p = alloc_pages_node(cpu_to_node(cpu),
873 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
874 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
880 for_each_possible_cpu(cpu) {
883 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
886 free_percpu(uprobe_cpu_buffer);
890 static int uprobe_buffer_enable(void)
894 BUG_ON(!mutex_is_locked(&event_mutex));
896 if (uprobe_buffer_refcnt++ == 0) {
897 ret = uprobe_buffer_init();
899 uprobe_buffer_refcnt--;
905 static void uprobe_buffer_disable(void)
909 BUG_ON(!mutex_is_locked(&event_mutex));
911 if (--uprobe_buffer_refcnt == 0) {
912 for_each_possible_cpu(cpu)
913 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
916 free_percpu(uprobe_cpu_buffer);
917 uprobe_cpu_buffer = NULL;
921 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
923 struct uprobe_cpu_buffer *ucb;
926 cpu = raw_smp_processor_id();
927 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
930 * Use per-cpu buffers for fastest access, but we might migrate
931 * so the mutex makes sure we have sole access to it.
933 mutex_lock(&ucb->mutex);
938 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
940 mutex_unlock(&ucb->mutex);
943 static void __uprobe_trace_func(struct trace_uprobe *tu,
944 unsigned long func, struct pt_regs *regs,
945 struct uprobe_cpu_buffer *ucb, int dsize,
946 struct trace_event_file *trace_file)
948 struct uprobe_trace_entry_head *entry;
949 struct ring_buffer_event *event;
950 struct ring_buffer *buffer;
953 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
955 WARN_ON(call != trace_file->event_call);
957 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
960 if (trace_trigger_soft_disabled(trace_file))
963 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
964 size = esize + tu->tp.size + dsize;
965 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
966 call->event.type, size, 0, 0);
970 entry = ring_buffer_event_data(event);
971 if (is_ret_probe(tu)) {
972 entry->vaddr[0] = func;
973 entry->vaddr[1] = instruction_pointer(regs);
974 data = DATAOF_TRACE_ENTRY(entry, true);
976 entry->vaddr[0] = instruction_pointer(regs);
977 data = DATAOF_TRACE_ENTRY(entry, false);
980 memcpy(data, ucb->buf, tu->tp.size + dsize);
982 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
986 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
987 struct uprobe_cpu_buffer *ucb, int dsize)
989 struct event_file_link *link;
991 if (is_ret_probe(tu))
995 trace_probe_for_each_link_rcu(link, &tu->tp)
996 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
1002 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1003 struct pt_regs *regs,
1004 struct uprobe_cpu_buffer *ucb, int dsize)
1006 struct event_file_link *link;
1009 trace_probe_for_each_link_rcu(link, &tu->tp)
1010 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1014 /* Event entry printers */
1015 static enum print_line_t
1016 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1018 struct uprobe_trace_entry_head *entry;
1019 struct trace_seq *s = &iter->seq;
1020 struct trace_uprobe *tu;
1023 entry = (struct uprobe_trace_entry_head *)iter->ent;
1024 tu = trace_uprobe_primary_from_call(
1025 container_of(event, struct trace_event_call, event));
1029 if (is_ret_probe(tu)) {
1030 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1031 trace_probe_name(&tu->tp),
1032 entry->vaddr[1], entry->vaddr[0]);
1033 data = DATAOF_TRACE_ENTRY(entry, true);
1035 trace_seq_printf(s, "%s: (0x%lx)",
1036 trace_probe_name(&tu->tp),
1038 data = DATAOF_TRACE_ENTRY(entry, false);
1041 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1044 trace_seq_putc(s, '\n');
1047 return trace_handle_return(s);
1050 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1051 enum uprobe_filter_ctx ctx,
1052 struct mm_struct *mm);
1054 static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1058 tu->consumer.filter = filter;
1059 tu->inode = d_real_inode(tu->path.dentry);
1061 if (tu->ref_ctr_offset)
1062 ret = uprobe_register_refctr(tu->inode, tu->offset,
1063 tu->ref_ctr_offset, &tu->consumer);
1065 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1073 static void __probe_event_disable(struct trace_probe *tp)
1075 struct trace_probe *pos;
1076 struct trace_uprobe *tu;
1078 tu = container_of(tp, struct trace_uprobe, tp);
1079 WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
1081 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1082 tu = container_of(pos, struct trace_uprobe, tp);
1086 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1091 static int probe_event_enable(struct trace_event_call *call,
1092 struct trace_event_file *file, filter_func_t filter)
1094 struct trace_probe *pos, *tp;
1095 struct trace_uprobe *tu;
1099 tp = trace_probe_primary_from_call(call);
1100 if (WARN_ON_ONCE(!tp))
1102 enabled = trace_probe_is_enabled(tp);
1104 /* This may also change "enabled" state */
1106 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1109 ret = trace_probe_add_file(tp, file);
1113 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1116 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1119 tu = container_of(tp, struct trace_uprobe, tp);
1120 WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
1125 ret = uprobe_buffer_enable();
1129 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1130 tu = container_of(pos, struct trace_uprobe, tp);
1131 ret = trace_uprobe_enable(tu, filter);
1133 __probe_event_disable(tp);
1141 uprobe_buffer_disable();
1145 trace_probe_remove_file(tp, file);
1147 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1152 static void probe_event_disable(struct trace_event_call *call,
1153 struct trace_event_file *file)
1155 struct trace_probe *tp;
1157 tp = trace_probe_primary_from_call(call);
1158 if (WARN_ON_ONCE(!tp))
1161 if (!trace_probe_is_enabled(tp))
1165 if (trace_probe_remove_file(tp, file) < 0)
1168 if (trace_probe_is_enabled(tp))
1171 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1173 __probe_event_disable(tp);
1174 uprobe_buffer_disable();
1177 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1180 struct uprobe_trace_entry_head field;
1181 struct trace_uprobe *tu;
1183 tu = trace_uprobe_primary_from_call(event_call);
1187 if (is_ret_probe(tu)) {
1188 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1189 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1190 size = SIZEOF_TRACE_ENTRY(true);
1192 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1193 size = SIZEOF_TRACE_ENTRY(false);
1196 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1199 #ifdef CONFIG_PERF_EVENTS
1201 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1203 struct perf_event *event;
1205 if (filter->nr_systemwide)
1208 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1209 if (event->hw.target->mm == mm)
1217 trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1218 struct perf_event *event)
1220 return __uprobe_perf_filter(filter, event->hw.target->mm);
1223 static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1224 struct perf_event *event)
1228 write_lock(&filter->rwlock);
1229 if (event->hw.target) {
1230 list_del(&event->hw.tp_list);
1231 done = filter->nr_systemwide ||
1232 (event->hw.target->flags & PF_EXITING) ||
1233 trace_uprobe_filter_event(filter, event);
1235 filter->nr_systemwide--;
1236 done = filter->nr_systemwide;
1238 write_unlock(&filter->rwlock);
1243 /* This returns true if the filter always covers target mm */
1244 static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1245 struct perf_event *event)
1249 write_lock(&filter->rwlock);
1250 if (event->hw.target) {
1252 * event->parent != NULL means copy_process(), we can avoid
1253 * uprobe_apply(). current->mm must be probed and we can rely
1254 * on dup_mmap() which preserves the already installed bp's.
1256 * attr.enable_on_exec means that exec/mmap will install the
1257 * breakpoints we need.
1259 done = filter->nr_systemwide ||
1260 event->parent || event->attr.enable_on_exec ||
1261 trace_uprobe_filter_event(filter, event);
1262 list_add(&event->hw.tp_list, &filter->perf_events);
1264 done = filter->nr_systemwide;
1265 filter->nr_systemwide++;
1267 write_unlock(&filter->rwlock);
1272 static int uprobe_perf_close(struct trace_event_call *call,
1273 struct perf_event *event)
1275 struct trace_probe *pos, *tp;
1276 struct trace_uprobe *tu;
1279 tp = trace_probe_primary_from_call(call);
1280 if (WARN_ON_ONCE(!tp))
1283 tu = container_of(tp, struct trace_uprobe, tp);
1284 if (trace_uprobe_filter_remove(trace_uprobe_get_filter(tu), event))
1287 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1288 tu = container_of(pos, struct trace_uprobe, tp);
1289 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1297 static int uprobe_perf_open(struct trace_event_call *call,
1298 struct perf_event *event)
1300 struct trace_probe *pos, *tp;
1301 struct trace_uprobe *tu;
1304 tp = trace_probe_primary_from_call(call);
1305 if (WARN_ON_ONCE(!tp))
1308 tu = container_of(tp, struct trace_uprobe, tp);
1309 if (trace_uprobe_filter_add(trace_uprobe_get_filter(tu), event))
1312 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1313 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1315 uprobe_perf_close(call, event);
1323 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1324 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1326 struct trace_uprobe_filter *filter;
1327 struct trace_uprobe *tu;
1330 tu = container_of(uc, struct trace_uprobe, consumer);
1331 filter = trace_uprobe_get_filter(tu);
1333 read_lock(&filter->rwlock);
1334 ret = __uprobe_perf_filter(filter, mm);
1335 read_unlock(&filter->rwlock);
1340 static void __uprobe_perf_func(struct trace_uprobe *tu,
1341 unsigned long func, struct pt_regs *regs,
1342 struct uprobe_cpu_buffer *ucb, int dsize)
1344 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1345 struct uprobe_trace_entry_head *entry;
1346 struct hlist_head *head;
1351 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1354 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1356 size = esize + tu->tp.size + dsize;
1357 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1358 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1362 head = this_cpu_ptr(call->perf_events);
1363 if (hlist_empty(head))
1366 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1370 if (is_ret_probe(tu)) {
1371 entry->vaddr[0] = func;
1372 entry->vaddr[1] = instruction_pointer(regs);
1373 data = DATAOF_TRACE_ENTRY(entry, true);
1375 entry->vaddr[0] = instruction_pointer(regs);
1376 data = DATAOF_TRACE_ENTRY(entry, false);
1379 memcpy(data, ucb->buf, tu->tp.size + dsize);
1381 if (size - esize > tu->tp.size + dsize) {
1382 int len = tu->tp.size + dsize;
1384 memset(data + len, 0, size - esize - len);
1387 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1393 /* uprobe profile handler */
1394 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1395 struct uprobe_cpu_buffer *ucb, int dsize)
1397 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1398 return UPROBE_HANDLER_REMOVE;
1400 if (!is_ret_probe(tu))
1401 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1405 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1406 struct pt_regs *regs,
1407 struct uprobe_cpu_buffer *ucb, int dsize)
1409 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1412 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1413 const char **filename, u64 *probe_offset,
1414 bool perf_type_tracepoint)
1416 const char *pevent = trace_event_name(event->tp_event);
1417 const char *group = event->tp_event->class->system;
1418 struct trace_uprobe *tu;
1420 if (perf_type_tracepoint)
1421 tu = find_probe_event(pevent, group);
1423 tu = event->tp_event->data;
1427 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1428 : BPF_FD_TYPE_UPROBE;
1429 *filename = tu->filename;
1430 *probe_offset = tu->offset;
1433 #endif /* CONFIG_PERF_EVENTS */
1436 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1439 struct trace_event_file *file = data;
1442 case TRACE_REG_REGISTER:
1443 return probe_event_enable(event, file, NULL);
1445 case TRACE_REG_UNREGISTER:
1446 probe_event_disable(event, file);
1449 #ifdef CONFIG_PERF_EVENTS
1450 case TRACE_REG_PERF_REGISTER:
1451 return probe_event_enable(event, NULL, uprobe_perf_filter);
1453 case TRACE_REG_PERF_UNREGISTER:
1454 probe_event_disable(event, NULL);
1457 case TRACE_REG_PERF_OPEN:
1458 return uprobe_perf_open(event, data);
1460 case TRACE_REG_PERF_CLOSE:
1461 return uprobe_perf_close(event, data);
1470 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1472 struct trace_uprobe *tu;
1473 struct uprobe_dispatch_data udd;
1474 struct uprobe_cpu_buffer *ucb;
1479 tu = container_of(con, struct trace_uprobe, consumer);
1483 udd.bp_addr = instruction_pointer(regs);
1485 current->utask->vaddr = (unsigned long) &udd;
1487 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1490 dsize = __get_data_size(&tu->tp, regs);
1491 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1493 ucb = uprobe_buffer_get();
1494 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1496 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1497 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1499 #ifdef CONFIG_PERF_EVENTS
1500 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1501 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1503 uprobe_buffer_put(ucb);
1507 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1508 unsigned long func, struct pt_regs *regs)
1510 struct trace_uprobe *tu;
1511 struct uprobe_dispatch_data udd;
1512 struct uprobe_cpu_buffer *ucb;
1515 tu = container_of(con, struct trace_uprobe, consumer);
1520 current->utask->vaddr = (unsigned long) &udd;
1522 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1525 dsize = __get_data_size(&tu->tp, regs);
1526 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1528 ucb = uprobe_buffer_get();
1529 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1531 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1532 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1534 #ifdef CONFIG_PERF_EVENTS
1535 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1536 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1538 uprobe_buffer_put(ucb);
1542 static struct trace_event_functions uprobe_funcs = {
1543 .trace = print_uprobe_event
1546 static inline void init_trace_event_call(struct trace_uprobe *tu)
1548 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1550 call->event.funcs = &uprobe_funcs;
1551 call->class->define_fields = uprobe_event_define_fields;
1553 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1554 call->class->reg = trace_uprobe_register;
1557 static int register_uprobe_event(struct trace_uprobe *tu)
1559 init_trace_event_call(tu);
1561 return trace_probe_register_event_call(&tu->tp);
1564 static int unregister_uprobe_event(struct trace_uprobe *tu)
1566 return trace_probe_unregister_event_call(&tu->tp);
1569 #ifdef CONFIG_PERF_EVENTS
1570 struct trace_event_call *
1571 create_local_trace_uprobe(char *name, unsigned long offs,
1572 unsigned long ref_ctr_offset, bool is_return)
1574 struct trace_uprobe *tu;
1578 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1580 return ERR_PTR(ret);
1582 if (!d_is_reg(path.dentry)) {
1584 return ERR_PTR(-EINVAL);
1588 * local trace_kprobes are not added to dyn_event, so they are never
1589 * searched in find_trace_kprobe(). Therefore, there is no concern of
1590 * duplicated name "DUMMY_EVENT" here.
1592 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1596 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1599 return ERR_CAST(tu);
1604 tu->ref_ctr_offset = ref_ctr_offset;
1605 tu->filename = kstrdup(name, GFP_KERNEL);
1606 init_trace_event_call(tu);
1608 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1613 return trace_probe_event_call(&tu->tp);
1615 free_trace_uprobe(tu);
1616 return ERR_PTR(ret);
1619 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1621 struct trace_uprobe *tu;
1623 tu = trace_uprobe_primary_from_call(event_call);
1625 free_trace_uprobe(tu);
1627 #endif /* CONFIG_PERF_EVENTS */
1629 /* Make a trace interface for controling probe points */
1630 static __init int init_uprobe_trace(void)
1632 struct dentry *d_tracer;
1635 ret = dyn_event_register(&trace_uprobe_ops);
1639 d_tracer = tracing_init_dentry();
1640 if (IS_ERR(d_tracer))
1643 trace_create_file("uprobe_events", 0644, d_tracer,
1644 NULL, &uprobe_events_ops);
1645 /* Profile interface */
1646 trace_create_file("uprobe_profile", 0444, d_tracer,
1647 NULL, &uprobe_profile_ops);
1651 fs_initcall(init_uprobe_trace);