Merge tag 'mmc-v5.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[linux-2.6-microblaze.git] / kernel / trace / trace_uprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * uprobes-based tracing events
4  *
5  * Copyright (C) IBM Corporation, 2010-2012
6  * Author:      Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7  */
8 #define pr_fmt(fmt)     "trace_uprobe: " fmt
9
10 #include <linux/ctype.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/uprobes.h>
14 #include <linux/namei.h>
15 #include <linux/string.h>
16 #include <linux/rculist.h>
17
18 #include "trace_dynevent.h"
19 #include "trace_probe.h"
20 #include "trace_probe_tmpl.h"
21
22 #define UPROBE_EVENT_SYSTEM     "uprobes"
23
24 struct uprobe_trace_entry_head {
25         struct trace_entry      ent;
26         unsigned long           vaddr[];
27 };
28
29 #define SIZEOF_TRACE_ENTRY(is_return)                   \
30         (sizeof(struct uprobe_trace_entry_head) +       \
31          sizeof(unsigned long) * (is_return ? 2 : 1))
32
33 #define DATAOF_TRACE_ENTRY(entry, is_return)            \
34         ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
35
36 struct trace_uprobe_filter {
37         rwlock_t                rwlock;
38         int                     nr_systemwide;
39         struct list_head        perf_events;
40 };
41
42 static int trace_uprobe_create(int argc, const char **argv);
43 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
44 static int trace_uprobe_release(struct dyn_event *ev);
45 static bool trace_uprobe_is_busy(struct dyn_event *ev);
46 static bool trace_uprobe_match(const char *system, const char *event,
47                                struct dyn_event *ev);
48
49 static struct dyn_event_operations trace_uprobe_ops = {
50         .create = trace_uprobe_create,
51         .show = trace_uprobe_show,
52         .is_busy = trace_uprobe_is_busy,
53         .free = trace_uprobe_release,
54         .match = trace_uprobe_match,
55 };
56
57 /*
58  * uprobe event core functions
59  */
60 struct trace_uprobe {
61         struct dyn_event                devent;
62         struct trace_uprobe_filter      filter;
63         struct uprobe_consumer          consumer;
64         struct path                     path;
65         struct inode                    *inode;
66         char                            *filename;
67         unsigned long                   offset;
68         unsigned long                   ref_ctr_offset;
69         unsigned long                   nhit;
70         struct trace_probe              tp;
71 };
72
73 static bool is_trace_uprobe(struct dyn_event *ev)
74 {
75         return ev->ops == &trace_uprobe_ops;
76 }
77
78 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
79 {
80         return container_of(ev, struct trace_uprobe, devent);
81 }
82
83 /**
84  * for_each_trace_uprobe - iterate over the trace_uprobe list
85  * @pos:        the struct trace_uprobe * for each entry
86  * @dpos:       the struct dyn_event * to use as a loop cursor
87  */
88 #define for_each_trace_uprobe(pos, dpos)        \
89         for_each_dyn_event(dpos)                \
90                 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
91
92 #define SIZEOF_TRACE_UPROBE(n)                          \
93         (offsetof(struct trace_uprobe, tp.args) +       \
94         (sizeof(struct probe_arg) * (n)))
95
96 static int register_uprobe_event(struct trace_uprobe *tu);
97 static int unregister_uprobe_event(struct trace_uprobe *tu);
98
99 struct uprobe_dispatch_data {
100         struct trace_uprobe     *tu;
101         unsigned long           bp_addr;
102 };
103
104 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
105 static int uretprobe_dispatcher(struct uprobe_consumer *con,
106                                 unsigned long func, struct pt_regs *regs);
107
108 #ifdef CONFIG_STACK_GROWSUP
109 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
110 {
111         return addr - (n * sizeof(long));
112 }
113 #else
114 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
115 {
116         return addr + (n * sizeof(long));
117 }
118 #endif
119
120 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
121 {
122         unsigned long ret;
123         unsigned long addr = user_stack_pointer(regs);
124
125         addr = adjust_stack_addr(addr, n);
126
127         if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
128                 return 0;
129
130         return ret;
131 }
132
133 /*
134  * Uprobes-specific fetch functions
135  */
136 static nokprobe_inline int
137 probe_mem_read(void *dest, void *src, size_t size)
138 {
139         void __user *vaddr = (void __force __user *)src;
140
141         return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
142 }
143 /*
144  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
145  * length and relative data location.
146  */
147 static nokprobe_inline int
148 fetch_store_string(unsigned long addr, void *dest, void *base)
149 {
150         long ret;
151         u32 loc = *(u32 *)dest;
152         int maxlen  = get_loc_len(loc);
153         u8 *dst = get_loc_data(dest, base);
154         void __user *src = (void __force __user *) addr;
155
156         if (unlikely(!maxlen))
157                 return -ENOMEM;
158
159         ret = strncpy_from_user(dst, src, maxlen);
160         if (ret >= 0) {
161                 if (ret == maxlen)
162                         dst[ret - 1] = '\0';
163                 else
164                         /*
165                          * Include the terminating null byte. In this case it
166                          * was copied by strncpy_from_user but not accounted
167                          * for in ret.
168                          */
169                         ret++;
170                 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
171         }
172
173         return ret;
174 }
175
176 /* Return the length of string -- including null terminal byte */
177 static nokprobe_inline int
178 fetch_store_strlen(unsigned long addr)
179 {
180         int len;
181         void __user *vaddr = (void __force __user *) addr;
182
183         len = strnlen_user(vaddr, MAX_STRING_SIZE);
184
185         return (len > MAX_STRING_SIZE) ? 0 : len;
186 }
187
188 static unsigned long translate_user_vaddr(unsigned long file_offset)
189 {
190         unsigned long base_addr;
191         struct uprobe_dispatch_data *udd;
192
193         udd = (void *) current->utask->vaddr;
194
195         base_addr = udd->bp_addr - udd->tu->offset;
196         return base_addr + file_offset;
197 }
198
199 /* Note that we don't verify it, since the code does not come from user space */
200 static int
201 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
202                    void *base)
203 {
204         unsigned long val;
205
206         /* 1st stage: get value from context */
207         switch (code->op) {
208         case FETCH_OP_REG:
209                 val = regs_get_register(regs, code->param);
210                 break;
211         case FETCH_OP_STACK:
212                 val = get_user_stack_nth(regs, code->param);
213                 break;
214         case FETCH_OP_STACKP:
215                 val = user_stack_pointer(regs);
216                 break;
217         case FETCH_OP_RETVAL:
218                 val = regs_return_value(regs);
219                 break;
220         case FETCH_OP_IMM:
221                 val = code->immediate;
222                 break;
223         case FETCH_OP_FOFFS:
224                 val = translate_user_vaddr(code->immediate);
225                 break;
226         default:
227                 return -EILSEQ;
228         }
229         code++;
230
231         return process_fetch_insn_bottom(code, val, dest, base);
232 }
233 NOKPROBE_SYMBOL(process_fetch_insn)
234
235 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
236 {
237         rwlock_init(&filter->rwlock);
238         filter->nr_systemwide = 0;
239         INIT_LIST_HEAD(&filter->perf_events);
240 }
241
242 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
243 {
244         return !filter->nr_systemwide && list_empty(&filter->perf_events);
245 }
246
247 static inline bool is_ret_probe(struct trace_uprobe *tu)
248 {
249         return tu->consumer.ret_handler != NULL;
250 }
251
252 static bool trace_uprobe_is_busy(struct dyn_event *ev)
253 {
254         struct trace_uprobe *tu = to_trace_uprobe(ev);
255
256         return trace_probe_is_enabled(&tu->tp);
257 }
258
259 static bool trace_uprobe_match(const char *system, const char *event,
260                                struct dyn_event *ev)
261 {
262         struct trace_uprobe *tu = to_trace_uprobe(ev);
263
264         return strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
265                 (!system || strcmp(tu->tp.call.class->system, system) == 0);
266 }
267
268 /*
269  * Allocate new trace_uprobe and initialize it (including uprobes).
270  */
271 static struct trace_uprobe *
272 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
273 {
274         struct trace_uprobe *tu;
275
276         if (!event || !is_good_name(event))
277                 return ERR_PTR(-EINVAL);
278
279         if (!group || !is_good_name(group))
280                 return ERR_PTR(-EINVAL);
281
282         tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
283         if (!tu)
284                 return ERR_PTR(-ENOMEM);
285
286         tu->tp.call.class = &tu->tp.class;
287         tu->tp.call.name = kstrdup(event, GFP_KERNEL);
288         if (!tu->tp.call.name)
289                 goto error;
290
291         tu->tp.class.system = kstrdup(group, GFP_KERNEL);
292         if (!tu->tp.class.system)
293                 goto error;
294
295         dyn_event_init(&tu->devent, &trace_uprobe_ops);
296         INIT_LIST_HEAD(&tu->tp.files);
297         tu->consumer.handler = uprobe_dispatcher;
298         if (is_ret)
299                 tu->consumer.ret_handler = uretprobe_dispatcher;
300         init_trace_uprobe_filter(&tu->filter);
301         return tu;
302
303 error:
304         kfree(tu->tp.call.name);
305         kfree(tu);
306
307         return ERR_PTR(-ENOMEM);
308 }
309
310 static void free_trace_uprobe(struct trace_uprobe *tu)
311 {
312         int i;
313
314         if (!tu)
315                 return;
316
317         for (i = 0; i < tu->tp.nr_args; i++)
318                 traceprobe_free_probe_arg(&tu->tp.args[i]);
319
320         path_put(&tu->path);
321         kfree(tu->tp.call.class->system);
322         kfree(tu->tp.call.name);
323         kfree(tu->filename);
324         kfree(tu);
325 }
326
327 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
328 {
329         struct dyn_event *pos;
330         struct trace_uprobe *tu;
331
332         for_each_trace_uprobe(tu, pos)
333                 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
334                     strcmp(tu->tp.call.class->system, group) == 0)
335                         return tu;
336
337         return NULL;
338 }
339
340 /* Unregister a trace_uprobe and probe_event */
341 static int unregister_trace_uprobe(struct trace_uprobe *tu)
342 {
343         int ret;
344
345         ret = unregister_uprobe_event(tu);
346         if (ret)
347                 return ret;
348
349         dyn_event_remove(&tu->devent);
350         free_trace_uprobe(tu);
351         return 0;
352 }
353
354 /*
355  * Uprobe with multiple reference counter is not allowed. i.e.
356  * If inode and offset matches, reference counter offset *must*
357  * match as well. Though, there is one exception: If user is
358  * replacing old trace_uprobe with new one(same group/event),
359  * then we allow same uprobe with new reference counter as far
360  * as the new one does not conflict with any other existing
361  * ones.
362  */
363 static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
364 {
365         struct dyn_event *pos;
366         struct trace_uprobe *tmp, *old = NULL;
367         struct inode *new_inode = d_real_inode(new->path.dentry);
368
369         old = find_probe_event(trace_event_name(&new->tp.call),
370                                 new->tp.call.class->system);
371
372         for_each_trace_uprobe(tmp, pos) {
373                 if ((old ? old != tmp : true) &&
374                     new_inode == d_real_inode(tmp->path.dentry) &&
375                     new->offset == tmp->offset &&
376                     new->ref_ctr_offset != tmp->ref_ctr_offset) {
377                         pr_warn("Reference counter offset mismatch.");
378                         return ERR_PTR(-EINVAL);
379                 }
380         }
381         return old;
382 }
383
384 /* Register a trace_uprobe and probe_event */
385 static int register_trace_uprobe(struct trace_uprobe *tu)
386 {
387         struct trace_uprobe *old_tu;
388         int ret;
389
390         mutex_lock(&event_mutex);
391
392         /* register as an event */
393         old_tu = find_old_trace_uprobe(tu);
394         if (IS_ERR(old_tu)) {
395                 ret = PTR_ERR(old_tu);
396                 goto end;
397         }
398
399         if (old_tu) {
400                 /* delete old event */
401                 ret = unregister_trace_uprobe(old_tu);
402                 if (ret)
403                         goto end;
404         }
405
406         ret = register_uprobe_event(tu);
407         if (ret) {
408                 pr_warn("Failed to register probe event(%d)\n", ret);
409                 goto end;
410         }
411
412         dyn_event_add(&tu->devent);
413
414 end:
415         mutex_unlock(&event_mutex);
416
417         return ret;
418 }
419
420 /*
421  * Argument syntax:
422  *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
423  *
424  *  - Remove uprobe: -:[GRP/]EVENT
425  */
426 static int trace_uprobe_create(int argc, const char **argv)
427 {
428         struct trace_uprobe *tu;
429         const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
430         char *arg, *filename, *rctr, *rctr_end, *tmp;
431         char buf[MAX_EVENT_NAME_LEN];
432         struct path path;
433         unsigned long offset, ref_ctr_offset;
434         bool is_return = false;
435         int i, ret;
436
437         ret = 0;
438         ref_ctr_offset = 0;
439
440         /* argc must be >= 1 */
441         if (argv[0][0] == 'r')
442                 is_return = true;
443         else if (argv[0][0] != 'p' || argc < 2)
444                 return -ECANCELED;
445
446         if (argv[0][1] == ':')
447                 event = &argv[0][2];
448
449         if (!strchr(argv[1], '/'))
450                 return -ECANCELED;
451
452         filename = kstrdup(argv[1], GFP_KERNEL);
453         if (!filename)
454                 return -ENOMEM;
455
456         /* Find the last occurrence, in case the path contains ':' too. */
457         arg = strrchr(filename, ':');
458         if (!arg || !isdigit(arg[1])) {
459                 kfree(filename);
460                 return -ECANCELED;
461         }
462
463         *arg++ = '\0';
464         ret = kern_path(filename, LOOKUP_FOLLOW, &path);
465         if (ret) {
466                 kfree(filename);
467                 return ret;
468         }
469         if (!d_is_reg(path.dentry)) {
470                 ret = -EINVAL;
471                 goto fail_address_parse;
472         }
473
474         /* Parse reference counter offset if specified. */
475         rctr = strchr(arg, '(');
476         if (rctr) {
477                 rctr_end = strchr(rctr, ')');
478                 if (rctr > rctr_end || *(rctr_end + 1) != 0) {
479                         ret = -EINVAL;
480                         pr_info("Invalid reference counter offset.\n");
481                         goto fail_address_parse;
482                 }
483
484                 *rctr++ = '\0';
485                 *rctr_end = '\0';
486                 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
487                 if (ret) {
488                         pr_info("Invalid reference counter offset.\n");
489                         goto fail_address_parse;
490                 }
491         }
492
493         /* Parse uprobe offset. */
494         ret = kstrtoul(arg, 0, &offset);
495         if (ret)
496                 goto fail_address_parse;
497
498         argc -= 2;
499         argv += 2;
500
501         /* setup a probe */
502         if (event) {
503                 ret = traceprobe_parse_event_name(&event, &group, buf);
504                 if (ret)
505                         goto fail_address_parse;
506         } else {
507                 char *tail;
508                 char *ptr;
509
510                 tail = kstrdup(kbasename(filename), GFP_KERNEL);
511                 if (!tail) {
512                         ret = -ENOMEM;
513                         goto fail_address_parse;
514                 }
515
516                 ptr = strpbrk(tail, ".-_");
517                 if (ptr)
518                         *ptr = '\0';
519
520                 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
521                 event = buf;
522                 kfree(tail);
523         }
524
525         tu = alloc_trace_uprobe(group, event, argc, is_return);
526         if (IS_ERR(tu)) {
527                 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
528                 ret = PTR_ERR(tu);
529                 goto fail_address_parse;
530         }
531         tu->offset = offset;
532         tu->ref_ctr_offset = ref_ctr_offset;
533         tu->path = path;
534         tu->filename = filename;
535
536         /* parse arguments */
537         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
538                 tmp = kstrdup(argv[i], GFP_KERNEL);
539                 if (!tmp) {
540                         ret = -ENOMEM;
541                         goto error;
542                 }
543
544                 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
545                                         is_return ? TPARG_FL_RETURN : 0);
546                 kfree(tmp);
547                 if (ret)
548                         goto error;
549         }
550
551         ret = register_trace_uprobe(tu);
552         if (ret)
553                 goto error;
554         return 0;
555
556 error:
557         free_trace_uprobe(tu);
558         return ret;
559
560 fail_address_parse:
561         path_put(&path);
562         kfree(filename);
563
564         pr_info("Failed to parse address or file.\n");
565
566         return ret;
567 }
568
569 static int create_or_delete_trace_uprobe(int argc, char **argv)
570 {
571         int ret;
572
573         if (argv[0][0] == '-')
574                 return dyn_event_release(argc, argv, &trace_uprobe_ops);
575
576         ret = trace_uprobe_create(argc, (const char **)argv);
577         return ret == -ECANCELED ? -EINVAL : ret;
578 }
579
580 static int trace_uprobe_release(struct dyn_event *ev)
581 {
582         struct trace_uprobe *tu = to_trace_uprobe(ev);
583
584         return unregister_trace_uprobe(tu);
585 }
586
587 /* Probes listing interfaces */
588 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
589 {
590         struct trace_uprobe *tu = to_trace_uprobe(ev);
591         char c = is_ret_probe(tu) ? 'r' : 'p';
592         int i;
593
594         seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
595                         trace_event_name(&tu->tp.call), tu->filename,
596                         (int)(sizeof(void *) * 2), tu->offset);
597
598         if (tu->ref_ctr_offset)
599                 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
600
601         for (i = 0; i < tu->tp.nr_args; i++)
602                 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
603
604         seq_putc(m, '\n');
605         return 0;
606 }
607
608 static int probes_seq_show(struct seq_file *m, void *v)
609 {
610         struct dyn_event *ev = v;
611
612         if (!is_trace_uprobe(ev))
613                 return 0;
614
615         return trace_uprobe_show(m, ev);
616 }
617
618 static const struct seq_operations probes_seq_op = {
619         .start  = dyn_event_seq_start,
620         .next   = dyn_event_seq_next,
621         .stop   = dyn_event_seq_stop,
622         .show   = probes_seq_show
623 };
624
625 static int probes_open(struct inode *inode, struct file *file)
626 {
627         int ret;
628
629         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
630                 ret = dyn_events_release_all(&trace_uprobe_ops);
631                 if (ret)
632                         return ret;
633         }
634
635         return seq_open(file, &probes_seq_op);
636 }
637
638 static ssize_t probes_write(struct file *file, const char __user *buffer,
639                             size_t count, loff_t *ppos)
640 {
641         return trace_parse_run_command(file, buffer, count, ppos,
642                                         create_or_delete_trace_uprobe);
643 }
644
645 static const struct file_operations uprobe_events_ops = {
646         .owner          = THIS_MODULE,
647         .open           = probes_open,
648         .read           = seq_read,
649         .llseek         = seq_lseek,
650         .release        = seq_release,
651         .write          = probes_write,
652 };
653
654 /* Probes profiling interfaces */
655 static int probes_profile_seq_show(struct seq_file *m, void *v)
656 {
657         struct dyn_event *ev = v;
658         struct trace_uprobe *tu;
659
660         if (!is_trace_uprobe(ev))
661                 return 0;
662
663         tu = to_trace_uprobe(ev);
664         seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
665                         trace_event_name(&tu->tp.call), tu->nhit);
666         return 0;
667 }
668
669 static const struct seq_operations profile_seq_op = {
670         .start  = dyn_event_seq_start,
671         .next   = dyn_event_seq_next,
672         .stop   = dyn_event_seq_stop,
673         .show   = probes_profile_seq_show
674 };
675
676 static int profile_open(struct inode *inode, struct file *file)
677 {
678         return seq_open(file, &profile_seq_op);
679 }
680
681 static const struct file_operations uprobe_profile_ops = {
682         .owner          = THIS_MODULE,
683         .open           = profile_open,
684         .read           = seq_read,
685         .llseek         = seq_lseek,
686         .release        = seq_release,
687 };
688
689 struct uprobe_cpu_buffer {
690         struct mutex mutex;
691         void *buf;
692 };
693 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
694 static int uprobe_buffer_refcnt;
695
696 static int uprobe_buffer_init(void)
697 {
698         int cpu, err_cpu;
699
700         uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
701         if (uprobe_cpu_buffer == NULL)
702                 return -ENOMEM;
703
704         for_each_possible_cpu(cpu) {
705                 struct page *p = alloc_pages_node(cpu_to_node(cpu),
706                                                   GFP_KERNEL, 0);
707                 if (p == NULL) {
708                         err_cpu = cpu;
709                         goto err;
710                 }
711                 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
712                 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
713         }
714
715         return 0;
716
717 err:
718         for_each_possible_cpu(cpu) {
719                 if (cpu == err_cpu)
720                         break;
721                 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
722         }
723
724         free_percpu(uprobe_cpu_buffer);
725         return -ENOMEM;
726 }
727
728 static int uprobe_buffer_enable(void)
729 {
730         int ret = 0;
731
732         BUG_ON(!mutex_is_locked(&event_mutex));
733
734         if (uprobe_buffer_refcnt++ == 0) {
735                 ret = uprobe_buffer_init();
736                 if (ret < 0)
737                         uprobe_buffer_refcnt--;
738         }
739
740         return ret;
741 }
742
743 static void uprobe_buffer_disable(void)
744 {
745         int cpu;
746
747         BUG_ON(!mutex_is_locked(&event_mutex));
748
749         if (--uprobe_buffer_refcnt == 0) {
750                 for_each_possible_cpu(cpu)
751                         free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
752                                                              cpu)->buf);
753
754                 free_percpu(uprobe_cpu_buffer);
755                 uprobe_cpu_buffer = NULL;
756         }
757 }
758
759 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
760 {
761         struct uprobe_cpu_buffer *ucb;
762         int cpu;
763
764         cpu = raw_smp_processor_id();
765         ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
766
767         /*
768          * Use per-cpu buffers for fastest access, but we might migrate
769          * so the mutex makes sure we have sole access to it.
770          */
771         mutex_lock(&ucb->mutex);
772
773         return ucb;
774 }
775
776 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
777 {
778         mutex_unlock(&ucb->mutex);
779 }
780
781 static void __uprobe_trace_func(struct trace_uprobe *tu,
782                                 unsigned long func, struct pt_regs *regs,
783                                 struct uprobe_cpu_buffer *ucb, int dsize,
784                                 struct trace_event_file *trace_file)
785 {
786         struct uprobe_trace_entry_head *entry;
787         struct ring_buffer_event *event;
788         struct ring_buffer *buffer;
789         void *data;
790         int size, esize;
791         struct trace_event_call *call = &tu->tp.call;
792
793         WARN_ON(call != trace_file->event_call);
794
795         if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
796                 return;
797
798         if (trace_trigger_soft_disabled(trace_file))
799                 return;
800
801         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
802         size = esize + tu->tp.size + dsize;
803         event = trace_event_buffer_lock_reserve(&buffer, trace_file,
804                                                 call->event.type, size, 0, 0);
805         if (!event)
806                 return;
807
808         entry = ring_buffer_event_data(event);
809         if (is_ret_probe(tu)) {
810                 entry->vaddr[0] = func;
811                 entry->vaddr[1] = instruction_pointer(regs);
812                 data = DATAOF_TRACE_ENTRY(entry, true);
813         } else {
814                 entry->vaddr[0] = instruction_pointer(regs);
815                 data = DATAOF_TRACE_ENTRY(entry, false);
816         }
817
818         memcpy(data, ucb->buf, tu->tp.size + dsize);
819
820         event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
821 }
822
823 /* uprobe handler */
824 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
825                              struct uprobe_cpu_buffer *ucb, int dsize)
826 {
827         struct event_file_link *link;
828
829         if (is_ret_probe(tu))
830                 return 0;
831
832         rcu_read_lock();
833         list_for_each_entry_rcu(link, &tu->tp.files, list)
834                 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
835         rcu_read_unlock();
836
837         return 0;
838 }
839
840 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
841                                  struct pt_regs *regs,
842                                  struct uprobe_cpu_buffer *ucb, int dsize)
843 {
844         struct event_file_link *link;
845
846         rcu_read_lock();
847         list_for_each_entry_rcu(link, &tu->tp.files, list)
848                 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
849         rcu_read_unlock();
850 }
851
852 /* Event entry printers */
853 static enum print_line_t
854 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
855 {
856         struct uprobe_trace_entry_head *entry;
857         struct trace_seq *s = &iter->seq;
858         struct trace_uprobe *tu;
859         u8 *data;
860
861         entry = (struct uprobe_trace_entry_head *)iter->ent;
862         tu = container_of(event, struct trace_uprobe, tp.call.event);
863
864         if (is_ret_probe(tu)) {
865                 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
866                                  trace_event_name(&tu->tp.call),
867                                  entry->vaddr[1], entry->vaddr[0]);
868                 data = DATAOF_TRACE_ENTRY(entry, true);
869         } else {
870                 trace_seq_printf(s, "%s: (0x%lx)",
871                                  trace_event_name(&tu->tp.call),
872                                  entry->vaddr[0]);
873                 data = DATAOF_TRACE_ENTRY(entry, false);
874         }
875
876         if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
877                 goto out;
878
879         trace_seq_putc(s, '\n');
880
881  out:
882         return trace_handle_return(s);
883 }
884
885 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
886                                 enum uprobe_filter_ctx ctx,
887                                 struct mm_struct *mm);
888
889 static int
890 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
891                    filter_func_t filter)
892 {
893         bool enabled = trace_probe_is_enabled(&tu->tp);
894         struct event_file_link *link = NULL;
895         int ret;
896
897         if (file) {
898                 if (tu->tp.flags & TP_FLAG_PROFILE)
899                         return -EINTR;
900
901                 link = kmalloc(sizeof(*link), GFP_KERNEL);
902                 if (!link)
903                         return -ENOMEM;
904
905                 link->file = file;
906                 list_add_tail_rcu(&link->list, &tu->tp.files);
907
908                 tu->tp.flags |= TP_FLAG_TRACE;
909         } else {
910                 if (tu->tp.flags & TP_FLAG_TRACE)
911                         return -EINTR;
912
913                 tu->tp.flags |= TP_FLAG_PROFILE;
914         }
915
916         WARN_ON(!uprobe_filter_is_empty(&tu->filter));
917
918         if (enabled)
919                 return 0;
920
921         ret = uprobe_buffer_enable();
922         if (ret)
923                 goto err_flags;
924
925         tu->consumer.filter = filter;
926         tu->inode = d_real_inode(tu->path.dentry);
927         if (tu->ref_ctr_offset) {
928                 ret = uprobe_register_refctr(tu->inode, tu->offset,
929                                 tu->ref_ctr_offset, &tu->consumer);
930         } else {
931                 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
932         }
933
934         if (ret)
935                 goto err_buffer;
936
937         return 0;
938
939  err_buffer:
940         uprobe_buffer_disable();
941
942  err_flags:
943         if (file) {
944                 list_del(&link->list);
945                 kfree(link);
946                 tu->tp.flags &= ~TP_FLAG_TRACE;
947         } else {
948                 tu->tp.flags &= ~TP_FLAG_PROFILE;
949         }
950         return ret;
951 }
952
953 static void
954 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
955 {
956         if (!trace_probe_is_enabled(&tu->tp))
957                 return;
958
959         if (file) {
960                 struct event_file_link *link;
961
962                 link = find_event_file_link(&tu->tp, file);
963                 if (!link)
964                         return;
965
966                 list_del_rcu(&link->list);
967                 /* synchronize with u{,ret}probe_trace_func */
968                 synchronize_rcu();
969                 kfree(link);
970
971                 if (!list_empty(&tu->tp.files))
972                         return;
973         }
974
975         WARN_ON(!uprobe_filter_is_empty(&tu->filter));
976
977         uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
978         tu->inode = NULL;
979         tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
980
981         uprobe_buffer_disable();
982 }
983
984 static int uprobe_event_define_fields(struct trace_event_call *event_call)
985 {
986         int ret, size;
987         struct uprobe_trace_entry_head field;
988         struct trace_uprobe *tu = event_call->data;
989
990         if (is_ret_probe(tu)) {
991                 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
992                 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
993                 size = SIZEOF_TRACE_ENTRY(true);
994         } else {
995                 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
996                 size = SIZEOF_TRACE_ENTRY(false);
997         }
998
999         return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1000 }
1001
1002 #ifdef CONFIG_PERF_EVENTS
1003 static bool
1004 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1005 {
1006         struct perf_event *event;
1007
1008         if (filter->nr_systemwide)
1009                 return true;
1010
1011         list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1012                 if (event->hw.target->mm == mm)
1013                         return true;
1014         }
1015
1016         return false;
1017 }
1018
1019 static inline bool
1020 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1021 {
1022         return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1023 }
1024
1025 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1026 {
1027         bool done;
1028
1029         write_lock(&tu->filter.rwlock);
1030         if (event->hw.target) {
1031                 list_del(&event->hw.tp_list);
1032                 done = tu->filter.nr_systemwide ||
1033                         (event->hw.target->flags & PF_EXITING) ||
1034                         uprobe_filter_event(tu, event);
1035         } else {
1036                 tu->filter.nr_systemwide--;
1037                 done = tu->filter.nr_systemwide;
1038         }
1039         write_unlock(&tu->filter.rwlock);
1040
1041         if (!done)
1042                 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1043
1044         return 0;
1045 }
1046
1047 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1048 {
1049         bool done;
1050         int err;
1051
1052         write_lock(&tu->filter.rwlock);
1053         if (event->hw.target) {
1054                 /*
1055                  * event->parent != NULL means copy_process(), we can avoid
1056                  * uprobe_apply(). current->mm must be probed and we can rely
1057                  * on dup_mmap() which preserves the already installed bp's.
1058                  *
1059                  * attr.enable_on_exec means that exec/mmap will install the
1060                  * breakpoints we need.
1061                  */
1062                 done = tu->filter.nr_systemwide ||
1063                         event->parent || event->attr.enable_on_exec ||
1064                         uprobe_filter_event(tu, event);
1065                 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1066         } else {
1067                 done = tu->filter.nr_systemwide;
1068                 tu->filter.nr_systemwide++;
1069         }
1070         write_unlock(&tu->filter.rwlock);
1071
1072         err = 0;
1073         if (!done) {
1074                 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1075                 if (err)
1076                         uprobe_perf_close(tu, event);
1077         }
1078         return err;
1079 }
1080
1081 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1082                                 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1083 {
1084         struct trace_uprobe *tu;
1085         int ret;
1086
1087         tu = container_of(uc, struct trace_uprobe, consumer);
1088         read_lock(&tu->filter.rwlock);
1089         ret = __uprobe_perf_filter(&tu->filter, mm);
1090         read_unlock(&tu->filter.rwlock);
1091
1092         return ret;
1093 }
1094
1095 static void __uprobe_perf_func(struct trace_uprobe *tu,
1096                                unsigned long func, struct pt_regs *regs,
1097                                struct uprobe_cpu_buffer *ucb, int dsize)
1098 {
1099         struct trace_event_call *call = &tu->tp.call;
1100         struct uprobe_trace_entry_head *entry;
1101         struct hlist_head *head;
1102         void *data;
1103         int size, esize;
1104         int rctx;
1105
1106         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1107                 return;
1108
1109         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1110
1111         size = esize + tu->tp.size + dsize;
1112         size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1113         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1114                 return;
1115
1116         preempt_disable();
1117         head = this_cpu_ptr(call->perf_events);
1118         if (hlist_empty(head))
1119                 goto out;
1120
1121         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1122         if (!entry)
1123                 goto out;
1124
1125         if (is_ret_probe(tu)) {
1126                 entry->vaddr[0] = func;
1127                 entry->vaddr[1] = instruction_pointer(regs);
1128                 data = DATAOF_TRACE_ENTRY(entry, true);
1129         } else {
1130                 entry->vaddr[0] = instruction_pointer(regs);
1131                 data = DATAOF_TRACE_ENTRY(entry, false);
1132         }
1133
1134         memcpy(data, ucb->buf, tu->tp.size + dsize);
1135
1136         if (size - esize > tu->tp.size + dsize) {
1137                 int len = tu->tp.size + dsize;
1138
1139                 memset(data + len, 0, size - esize - len);
1140         }
1141
1142         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1143                               head, NULL);
1144  out:
1145         preempt_enable();
1146 }
1147
1148 /* uprobe profile handler */
1149 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1150                             struct uprobe_cpu_buffer *ucb, int dsize)
1151 {
1152         if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1153                 return UPROBE_HANDLER_REMOVE;
1154
1155         if (!is_ret_probe(tu))
1156                 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1157         return 0;
1158 }
1159
1160 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1161                                 struct pt_regs *regs,
1162                                 struct uprobe_cpu_buffer *ucb, int dsize)
1163 {
1164         __uprobe_perf_func(tu, func, regs, ucb, dsize);
1165 }
1166
1167 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1168                         const char **filename, u64 *probe_offset,
1169                         bool perf_type_tracepoint)
1170 {
1171         const char *pevent = trace_event_name(event->tp_event);
1172         const char *group = event->tp_event->class->system;
1173         struct trace_uprobe *tu;
1174
1175         if (perf_type_tracepoint)
1176                 tu = find_probe_event(pevent, group);
1177         else
1178                 tu = event->tp_event->data;
1179         if (!tu)
1180                 return -EINVAL;
1181
1182         *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1183                                     : BPF_FD_TYPE_UPROBE;
1184         *filename = tu->filename;
1185         *probe_offset = tu->offset;
1186         return 0;
1187 }
1188 #endif  /* CONFIG_PERF_EVENTS */
1189
1190 static int
1191 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1192                       void *data)
1193 {
1194         struct trace_uprobe *tu = event->data;
1195         struct trace_event_file *file = data;
1196
1197         switch (type) {
1198         case TRACE_REG_REGISTER:
1199                 return probe_event_enable(tu, file, NULL);
1200
1201         case TRACE_REG_UNREGISTER:
1202                 probe_event_disable(tu, file);
1203                 return 0;
1204
1205 #ifdef CONFIG_PERF_EVENTS
1206         case TRACE_REG_PERF_REGISTER:
1207                 return probe_event_enable(tu, NULL, uprobe_perf_filter);
1208
1209         case TRACE_REG_PERF_UNREGISTER:
1210                 probe_event_disable(tu, NULL);
1211                 return 0;
1212
1213         case TRACE_REG_PERF_OPEN:
1214                 return uprobe_perf_open(tu, data);
1215
1216         case TRACE_REG_PERF_CLOSE:
1217                 return uprobe_perf_close(tu, data);
1218
1219 #endif
1220         default:
1221                 return 0;
1222         }
1223         return 0;
1224 }
1225
1226 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1227 {
1228         struct trace_uprobe *tu;
1229         struct uprobe_dispatch_data udd;
1230         struct uprobe_cpu_buffer *ucb;
1231         int dsize, esize;
1232         int ret = 0;
1233
1234
1235         tu = container_of(con, struct trace_uprobe, consumer);
1236         tu->nhit++;
1237
1238         udd.tu = tu;
1239         udd.bp_addr = instruction_pointer(regs);
1240
1241         current->utask->vaddr = (unsigned long) &udd;
1242
1243         if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1244                 return 0;
1245
1246         dsize = __get_data_size(&tu->tp, regs);
1247         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1248
1249         ucb = uprobe_buffer_get();
1250         store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1251
1252         if (tu->tp.flags & TP_FLAG_TRACE)
1253                 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1254
1255 #ifdef CONFIG_PERF_EVENTS
1256         if (tu->tp.flags & TP_FLAG_PROFILE)
1257                 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1258 #endif
1259         uprobe_buffer_put(ucb);
1260         return ret;
1261 }
1262
1263 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1264                                 unsigned long func, struct pt_regs *regs)
1265 {
1266         struct trace_uprobe *tu;
1267         struct uprobe_dispatch_data udd;
1268         struct uprobe_cpu_buffer *ucb;
1269         int dsize, esize;
1270
1271         tu = container_of(con, struct trace_uprobe, consumer);
1272
1273         udd.tu = tu;
1274         udd.bp_addr = func;
1275
1276         current->utask->vaddr = (unsigned long) &udd;
1277
1278         if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1279                 return 0;
1280
1281         dsize = __get_data_size(&tu->tp, regs);
1282         esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1283
1284         ucb = uprobe_buffer_get();
1285         store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1286
1287         if (tu->tp.flags & TP_FLAG_TRACE)
1288                 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1289
1290 #ifdef CONFIG_PERF_EVENTS
1291         if (tu->tp.flags & TP_FLAG_PROFILE)
1292                 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1293 #endif
1294         uprobe_buffer_put(ucb);
1295         return 0;
1296 }
1297
1298 static struct trace_event_functions uprobe_funcs = {
1299         .trace          = print_uprobe_event
1300 };
1301
1302 static inline void init_trace_event_call(struct trace_uprobe *tu,
1303                                          struct trace_event_call *call)
1304 {
1305         INIT_LIST_HEAD(&call->class->fields);
1306         call->event.funcs = &uprobe_funcs;
1307         call->class->define_fields = uprobe_event_define_fields;
1308
1309         call->flags = TRACE_EVENT_FL_UPROBE;
1310         call->class->reg = trace_uprobe_register;
1311         call->data = tu;
1312 }
1313
1314 static int register_uprobe_event(struct trace_uprobe *tu)
1315 {
1316         struct trace_event_call *call = &tu->tp.call;
1317         int ret = 0;
1318
1319         init_trace_event_call(tu, call);
1320
1321         if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1322                 return -ENOMEM;
1323
1324         ret = register_trace_event(&call->event);
1325         if (!ret) {
1326                 kfree(call->print_fmt);
1327                 return -ENODEV;
1328         }
1329
1330         ret = trace_add_event_call(call);
1331
1332         if (ret) {
1333                 pr_info("Failed to register uprobe event: %s\n",
1334                         trace_event_name(call));
1335                 kfree(call->print_fmt);
1336                 unregister_trace_event(&call->event);
1337         }
1338
1339         return ret;
1340 }
1341
1342 static int unregister_uprobe_event(struct trace_uprobe *tu)
1343 {
1344         int ret;
1345
1346         /* tu->event is unregistered in trace_remove_event_call() */
1347         ret = trace_remove_event_call(&tu->tp.call);
1348         if (ret)
1349                 return ret;
1350         kfree(tu->tp.call.print_fmt);
1351         tu->tp.call.print_fmt = NULL;
1352         return 0;
1353 }
1354
1355 #ifdef CONFIG_PERF_EVENTS
1356 struct trace_event_call *
1357 create_local_trace_uprobe(char *name, unsigned long offs,
1358                           unsigned long ref_ctr_offset, bool is_return)
1359 {
1360         struct trace_uprobe *tu;
1361         struct path path;
1362         int ret;
1363
1364         ret = kern_path(name, LOOKUP_FOLLOW, &path);
1365         if (ret)
1366                 return ERR_PTR(ret);
1367
1368         if (!d_is_reg(path.dentry)) {
1369                 path_put(&path);
1370                 return ERR_PTR(-EINVAL);
1371         }
1372
1373         /*
1374          * local trace_kprobes are not added to dyn_event, so they are never
1375          * searched in find_trace_kprobe(). Therefore, there is no concern of
1376          * duplicated name "DUMMY_EVENT" here.
1377          */
1378         tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1379                                 is_return);
1380
1381         if (IS_ERR(tu)) {
1382                 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1383                         (int)PTR_ERR(tu));
1384                 path_put(&path);
1385                 return ERR_CAST(tu);
1386         }
1387
1388         tu->offset = offs;
1389         tu->path = path;
1390         tu->ref_ctr_offset = ref_ctr_offset;
1391         tu->filename = kstrdup(name, GFP_KERNEL);
1392         init_trace_event_call(tu, &tu->tp.call);
1393
1394         if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1395                 ret = -ENOMEM;
1396                 goto error;
1397         }
1398
1399         return &tu->tp.call;
1400 error:
1401         free_trace_uprobe(tu);
1402         return ERR_PTR(ret);
1403 }
1404
1405 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1406 {
1407         struct trace_uprobe *tu;
1408
1409         tu = container_of(event_call, struct trace_uprobe, tp.call);
1410
1411         kfree(tu->tp.call.print_fmt);
1412         tu->tp.call.print_fmt = NULL;
1413
1414         free_trace_uprobe(tu);
1415 }
1416 #endif /* CONFIG_PERF_EVENTS */
1417
1418 /* Make a trace interface for controling probe points */
1419 static __init int init_uprobe_trace(void)
1420 {
1421         struct dentry *d_tracer;
1422         int ret;
1423
1424         ret = dyn_event_register(&trace_uprobe_ops);
1425         if (ret)
1426                 return ret;
1427
1428         d_tracer = tracing_init_dentry();
1429         if (IS_ERR(d_tracer))
1430                 return 0;
1431
1432         trace_create_file("uprobe_events", 0644, d_tracer,
1433                                     NULL, &uprobe_events_ops);
1434         /* Profile interface */
1435         trace_create_file("uprobe_profile", 0444, d_tracer,
1436                                     NULL, &uprobe_profile_ops);
1437         return 0;
1438 }
1439
1440 fs_initcall(init_uprobe_trace);