ring-buffer: Check for NULL cpu_buffer in ring_buffer_wake_waiters()
[linux-2.6-microblaze.git] / kernel / trace / trace_kprobe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kprobes-based tracing events
4  *
5  * Created by Masami Hiramatsu <mhiramat@redhat.com>
6  *
7  */
8 #define pr_fmt(fmt)     "trace_kprobe: " fmt
9
10 #include <linux/bpf-cgroup.h>
11 #include <linux/security.h>
12 #include <linux/module.h>
13 #include <linux/uaccess.h>
14 #include <linux/rculist.h>
15 #include <linux/error-injection.h>
16
17 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
18
19 #include "trace_dynevent.h"
20 #include "trace_kprobe_selftest.h"
21 #include "trace_probe.h"
22 #include "trace_probe_tmpl.h"
23 #include "trace_probe_kernel.h"
24
25 #define KPROBE_EVENT_SYSTEM "kprobes"
26 #define KRETPROBE_MAXACTIVE_MAX 4096
27
28 /* Kprobe early definition from command line */
29 static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
30
31 static int __init set_kprobe_boot_events(char *str)
32 {
33         strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
34         disable_tracing_selftest("running kprobe events");
35
36         return 1;
37 }
38 __setup("kprobe_event=", set_kprobe_boot_events);
39
40 static int trace_kprobe_create(const char *raw_command);
41 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
42 static int trace_kprobe_release(struct dyn_event *ev);
43 static bool trace_kprobe_is_busy(struct dyn_event *ev);
44 static bool trace_kprobe_match(const char *system, const char *event,
45                         int argc, const char **argv, struct dyn_event *ev);
46
47 static struct dyn_event_operations trace_kprobe_ops = {
48         .create = trace_kprobe_create,
49         .show = trace_kprobe_show,
50         .is_busy = trace_kprobe_is_busy,
51         .free = trace_kprobe_release,
52         .match = trace_kprobe_match,
53 };
54
55 /*
56  * Kprobe event core functions
57  */
58 struct trace_kprobe {
59         struct dyn_event        devent;
60         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
61         unsigned long __percpu *nhit;
62         const char              *symbol;        /* symbol name */
63         struct trace_probe      tp;
64 };
65
66 static bool is_trace_kprobe(struct dyn_event *ev)
67 {
68         return ev->ops == &trace_kprobe_ops;
69 }
70
71 static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
72 {
73         return container_of(ev, struct trace_kprobe, devent);
74 }
75
76 /**
77  * for_each_trace_kprobe - iterate over the trace_kprobe list
78  * @pos:        the struct trace_kprobe * for each entry
79  * @dpos:       the struct dyn_event * to use as a loop cursor
80  */
81 #define for_each_trace_kprobe(pos, dpos)        \
82         for_each_dyn_event(dpos)                \
83                 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
84
85 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
86 {
87         return tk->rp.handler != NULL;
88 }
89
90 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
91 {
92         return tk->symbol ? tk->symbol : "unknown";
93 }
94
95 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
96 {
97         return tk->rp.kp.offset;
98 }
99
100 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
101 {
102         return kprobe_gone(&tk->rp.kp);
103 }
104
105 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
106                                                  struct module *mod)
107 {
108         int len = strlen(module_name(mod));
109         const char *name = trace_kprobe_symbol(tk);
110
111         return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
112 }
113
114 static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
115 {
116         char *p;
117         bool ret;
118
119         if (!tk->symbol)
120                 return false;
121         p = strchr(tk->symbol, ':');
122         if (!p)
123                 return true;
124         *p = '\0';
125         rcu_read_lock_sched();
126         ret = !!find_module(tk->symbol);
127         rcu_read_unlock_sched();
128         *p = ':';
129
130         return ret;
131 }
132
133 static bool trace_kprobe_is_busy(struct dyn_event *ev)
134 {
135         struct trace_kprobe *tk = to_trace_kprobe(ev);
136
137         return trace_probe_is_enabled(&tk->tp);
138 }
139
140 static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
141                                             int argc, const char **argv)
142 {
143         char buf[MAX_ARGSTR_LEN + 1];
144
145         if (!argc)
146                 return true;
147
148         if (!tk->symbol)
149                 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
150         else if (tk->rp.kp.offset)
151                 snprintf(buf, sizeof(buf), "%s+%u",
152                          trace_kprobe_symbol(tk), tk->rp.kp.offset);
153         else
154                 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
155         if (strcmp(buf, argv[0]))
156                 return false;
157         argc--; argv++;
158
159         return trace_probe_match_command_args(&tk->tp, argc, argv);
160 }
161
162 static bool trace_kprobe_match(const char *system, const char *event,
163                         int argc, const char **argv, struct dyn_event *ev)
164 {
165         struct trace_kprobe *tk = to_trace_kprobe(ev);
166
167         return (event[0] == '\0' ||
168                 strcmp(trace_probe_name(&tk->tp), event) == 0) &&
169             (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
170             trace_kprobe_match_command_head(tk, argc, argv);
171 }
172
173 static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
174 {
175         unsigned long nhit = 0;
176         int cpu;
177
178         for_each_possible_cpu(cpu)
179                 nhit += *per_cpu_ptr(tk->nhit, cpu);
180
181         return nhit;
182 }
183
184 static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
185 {
186         return !(list_empty(&tk->rp.kp.list) &&
187                  hlist_unhashed(&tk->rp.kp.hlist));
188 }
189
190 /* Return 0 if it fails to find the symbol address */
191 static nokprobe_inline
192 unsigned long trace_kprobe_address(struct trace_kprobe *tk)
193 {
194         unsigned long addr;
195
196         if (tk->symbol) {
197                 addr = (unsigned long)
198                         kallsyms_lookup_name(trace_kprobe_symbol(tk));
199                 if (addr)
200                         addr += tk->rp.kp.offset;
201         } else {
202                 addr = (unsigned long)tk->rp.kp.addr;
203         }
204         return addr;
205 }
206
207 static nokprobe_inline struct trace_kprobe *
208 trace_kprobe_primary_from_call(struct trace_event_call *call)
209 {
210         struct trace_probe *tp;
211
212         tp = trace_probe_primary_from_call(call);
213         if (WARN_ON_ONCE(!tp))
214                 return NULL;
215
216         return container_of(tp, struct trace_kprobe, tp);
217 }
218
219 bool trace_kprobe_on_func_entry(struct trace_event_call *call)
220 {
221         struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
222
223         return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
224                         tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
225                         tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
226 }
227
228 bool trace_kprobe_error_injectable(struct trace_event_call *call)
229 {
230         struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
231
232         return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
233                false;
234 }
235
236 static int register_kprobe_event(struct trace_kprobe *tk);
237 static int unregister_kprobe_event(struct trace_kprobe *tk);
238
239 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
240 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
241                                 struct pt_regs *regs);
242
243 static void free_trace_kprobe(struct trace_kprobe *tk)
244 {
245         if (tk) {
246                 trace_probe_cleanup(&tk->tp);
247                 kfree(tk->symbol);
248                 free_percpu(tk->nhit);
249                 kfree(tk);
250         }
251 }
252
253 /*
254  * Allocate new trace_probe and initialize it (including kprobes).
255  */
256 static struct trace_kprobe *alloc_trace_kprobe(const char *group,
257                                              const char *event,
258                                              void *addr,
259                                              const char *symbol,
260                                              unsigned long offs,
261                                              int maxactive,
262                                              int nargs, bool is_return)
263 {
264         struct trace_kprobe *tk;
265         int ret = -ENOMEM;
266
267         tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
268         if (!tk)
269                 return ERR_PTR(ret);
270
271         tk->nhit = alloc_percpu(unsigned long);
272         if (!tk->nhit)
273                 goto error;
274
275         if (symbol) {
276                 tk->symbol = kstrdup(symbol, GFP_KERNEL);
277                 if (!tk->symbol)
278                         goto error;
279                 tk->rp.kp.symbol_name = tk->symbol;
280                 tk->rp.kp.offset = offs;
281         } else
282                 tk->rp.kp.addr = addr;
283
284         if (is_return)
285                 tk->rp.handler = kretprobe_dispatcher;
286         else
287                 tk->rp.kp.pre_handler = kprobe_dispatcher;
288
289         tk->rp.maxactive = maxactive;
290         INIT_HLIST_NODE(&tk->rp.kp.hlist);
291         INIT_LIST_HEAD(&tk->rp.kp.list);
292
293         ret = trace_probe_init(&tk->tp, event, group, false);
294         if (ret < 0)
295                 goto error;
296
297         dyn_event_init(&tk->devent, &trace_kprobe_ops);
298         return tk;
299 error:
300         free_trace_kprobe(tk);
301         return ERR_PTR(ret);
302 }
303
304 static struct trace_kprobe *find_trace_kprobe(const char *event,
305                                               const char *group)
306 {
307         struct dyn_event *pos;
308         struct trace_kprobe *tk;
309
310         for_each_trace_kprobe(tk, pos)
311                 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
312                     strcmp(trace_probe_group_name(&tk->tp), group) == 0)
313                         return tk;
314         return NULL;
315 }
316
317 static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
318 {
319         int ret = 0;
320
321         if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
322                 if (trace_kprobe_is_return(tk))
323                         ret = enable_kretprobe(&tk->rp);
324                 else
325                         ret = enable_kprobe(&tk->rp.kp);
326         }
327
328         return ret;
329 }
330
331 static void __disable_trace_kprobe(struct trace_probe *tp)
332 {
333         struct trace_kprobe *tk;
334
335         list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
336                 if (!trace_kprobe_is_registered(tk))
337                         continue;
338                 if (trace_kprobe_is_return(tk))
339                         disable_kretprobe(&tk->rp);
340                 else
341                         disable_kprobe(&tk->rp.kp);
342         }
343 }
344
345 /*
346  * Enable trace_probe
347  * if the file is NULL, enable "perf" handler, or enable "trace" handler.
348  */
349 static int enable_trace_kprobe(struct trace_event_call *call,
350                                 struct trace_event_file *file)
351 {
352         struct trace_probe *tp;
353         struct trace_kprobe *tk;
354         bool enabled;
355         int ret = 0;
356
357         tp = trace_probe_primary_from_call(call);
358         if (WARN_ON_ONCE(!tp))
359                 return -ENODEV;
360         enabled = trace_probe_is_enabled(tp);
361
362         /* This also changes "enabled" state */
363         if (file) {
364                 ret = trace_probe_add_file(tp, file);
365                 if (ret)
366                         return ret;
367         } else
368                 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
369
370         if (enabled)
371                 return 0;
372
373         list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
374                 if (trace_kprobe_has_gone(tk))
375                         continue;
376                 ret = __enable_trace_kprobe(tk);
377                 if (ret)
378                         break;
379                 enabled = true;
380         }
381
382         if (ret) {
383                 /* Failed to enable one of them. Roll back all */
384                 if (enabled)
385                         __disable_trace_kprobe(tp);
386                 if (file)
387                         trace_probe_remove_file(tp, file);
388                 else
389                         trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
390         }
391
392         return ret;
393 }
394
395 /*
396  * Disable trace_probe
397  * if the file is NULL, disable "perf" handler, or disable "trace" handler.
398  */
399 static int disable_trace_kprobe(struct trace_event_call *call,
400                                 struct trace_event_file *file)
401 {
402         struct trace_probe *tp;
403
404         tp = trace_probe_primary_from_call(call);
405         if (WARN_ON_ONCE(!tp))
406                 return -ENODEV;
407
408         if (file) {
409                 if (!trace_probe_get_file_link(tp, file))
410                         return -ENOENT;
411                 if (!trace_probe_has_single_file(tp))
412                         goto out;
413                 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
414         } else
415                 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
416
417         if (!trace_probe_is_enabled(tp))
418                 __disable_trace_kprobe(tp);
419
420  out:
421         if (file)
422                 /*
423                  * Synchronization is done in below function. For perf event,
424                  * file == NULL and perf_trace_event_unreg() calls
425                  * tracepoint_synchronize_unregister() to ensure synchronize
426                  * event. We don't need to care about it.
427                  */
428                 trace_probe_remove_file(tp, file);
429
430         return 0;
431 }
432
433 #if defined(CONFIG_DYNAMIC_FTRACE) && \
434         !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
435 static bool __within_notrace_func(unsigned long addr)
436 {
437         unsigned long offset, size;
438
439         if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
440                 return false;
441
442         /* Get the entry address of the target function */
443         addr -= offset;
444
445         /*
446          * Since ftrace_location_range() does inclusive range check, we need
447          * to subtract 1 byte from the end address.
448          */
449         return !ftrace_location_range(addr, addr + size - 1);
450 }
451
452 static bool within_notrace_func(struct trace_kprobe *tk)
453 {
454         unsigned long addr = trace_kprobe_address(tk);
455         char symname[KSYM_NAME_LEN], *p;
456
457         if (!__within_notrace_func(addr))
458                 return false;
459
460         /* Check if the address is on a suffixed-symbol */
461         if (!lookup_symbol_name(addr, symname)) {
462                 p = strchr(symname, '.');
463                 if (!p)
464                         return true;
465                 *p = '\0';
466                 addr = (unsigned long)kprobe_lookup_name(symname, 0);
467                 if (addr)
468                         return __within_notrace_func(addr);
469         }
470
471         return true;
472 }
473 #else
474 #define within_notrace_func(tk) (false)
475 #endif
476
477 /* Internal register function - just handle k*probes and flags */
478 static int __register_trace_kprobe(struct trace_kprobe *tk)
479 {
480         int i, ret;
481
482         ret = security_locked_down(LOCKDOWN_KPROBES);
483         if (ret)
484                 return ret;
485
486         if (trace_kprobe_is_registered(tk))
487                 return -EINVAL;
488
489         if (within_notrace_func(tk)) {
490                 pr_warn("Could not probe notrace function %s\n",
491                         trace_kprobe_symbol(tk));
492                 return -EINVAL;
493         }
494
495         for (i = 0; i < tk->tp.nr_args; i++) {
496                 ret = traceprobe_update_arg(&tk->tp.args[i]);
497                 if (ret)
498                         return ret;
499         }
500
501         /* Set/clear disabled flag according to tp->flag */
502         if (trace_probe_is_enabled(&tk->tp))
503                 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
504         else
505                 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
506
507         if (trace_kprobe_is_return(tk))
508                 ret = register_kretprobe(&tk->rp);
509         else
510                 ret = register_kprobe(&tk->rp.kp);
511
512         return ret;
513 }
514
515 /* Internal unregister function - just handle k*probes and flags */
516 static void __unregister_trace_kprobe(struct trace_kprobe *tk)
517 {
518         if (trace_kprobe_is_registered(tk)) {
519                 if (trace_kprobe_is_return(tk))
520                         unregister_kretprobe(&tk->rp);
521                 else
522                         unregister_kprobe(&tk->rp.kp);
523                 /* Cleanup kprobe for reuse and mark it unregistered */
524                 INIT_HLIST_NODE(&tk->rp.kp.hlist);
525                 INIT_LIST_HEAD(&tk->rp.kp.list);
526                 if (tk->rp.kp.symbol_name)
527                         tk->rp.kp.addr = NULL;
528         }
529 }
530
531 /* Unregister a trace_probe and probe_event */
532 static int unregister_trace_kprobe(struct trace_kprobe *tk)
533 {
534         /* If other probes are on the event, just unregister kprobe */
535         if (trace_probe_has_sibling(&tk->tp))
536                 goto unreg;
537
538         /* Enabled event can not be unregistered */
539         if (trace_probe_is_enabled(&tk->tp))
540                 return -EBUSY;
541
542         /* If there's a reference to the dynamic event */
543         if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
544                 return -EBUSY;
545
546         /* Will fail if probe is being used by ftrace or perf */
547         if (unregister_kprobe_event(tk))
548                 return -EBUSY;
549
550 unreg:
551         __unregister_trace_kprobe(tk);
552         dyn_event_remove(&tk->devent);
553         trace_probe_unlink(&tk->tp);
554
555         return 0;
556 }
557
558 static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
559                                          struct trace_kprobe *comp)
560 {
561         struct trace_probe_event *tpe = orig->tp.event;
562         int i;
563
564         list_for_each_entry(orig, &tpe->probes, tp.list) {
565                 if (strcmp(trace_kprobe_symbol(orig),
566                            trace_kprobe_symbol(comp)) ||
567                     trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
568                         continue;
569
570                 /*
571                  * trace_probe_compare_arg_type() ensured that nr_args and
572                  * each argument name and type are same. Let's compare comm.
573                  */
574                 for (i = 0; i < orig->tp.nr_args; i++) {
575                         if (strcmp(orig->tp.args[i].comm,
576                                    comp->tp.args[i].comm))
577                                 break;
578                 }
579
580                 if (i == orig->tp.nr_args)
581                         return true;
582         }
583
584         return false;
585 }
586
587 static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
588 {
589         int ret;
590
591         ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
592         if (ret) {
593                 /* Note that argument starts index = 2 */
594                 trace_probe_log_set_index(ret + 1);
595                 trace_probe_log_err(0, DIFF_ARG_TYPE);
596                 return -EEXIST;
597         }
598         if (trace_kprobe_has_same_kprobe(to, tk)) {
599                 trace_probe_log_set_index(0);
600                 trace_probe_log_err(0, SAME_PROBE);
601                 return -EEXIST;
602         }
603
604         /* Append to existing event */
605         ret = trace_probe_append(&tk->tp, &to->tp);
606         if (ret)
607                 return ret;
608
609         /* Register k*probe */
610         ret = __register_trace_kprobe(tk);
611         if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
612                 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
613                 ret = 0;
614         }
615
616         if (ret)
617                 trace_probe_unlink(&tk->tp);
618         else
619                 dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
620
621         return ret;
622 }
623
624 /* Register a trace_probe and probe_event */
625 static int register_trace_kprobe(struct trace_kprobe *tk)
626 {
627         struct trace_kprobe *old_tk;
628         int ret;
629
630         mutex_lock(&event_mutex);
631
632         old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
633                                    trace_probe_group_name(&tk->tp));
634         if (old_tk) {
635                 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
636                         trace_probe_log_set_index(0);
637                         trace_probe_log_err(0, DIFF_PROBE_TYPE);
638                         ret = -EEXIST;
639                 } else {
640                         ret = append_trace_kprobe(tk, old_tk);
641                 }
642                 goto end;
643         }
644
645         /* Register new event */
646         ret = register_kprobe_event(tk);
647         if (ret) {
648                 if (ret == -EEXIST) {
649                         trace_probe_log_set_index(0);
650                         trace_probe_log_err(0, EVENT_EXIST);
651                 } else
652                         pr_warn("Failed to register probe event(%d)\n", ret);
653                 goto end;
654         }
655
656         /* Register k*probe */
657         ret = __register_trace_kprobe(tk);
658         if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
659                 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
660                 ret = 0;
661         }
662
663         if (ret < 0)
664                 unregister_kprobe_event(tk);
665         else
666                 dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
667
668 end:
669         mutex_unlock(&event_mutex);
670         return ret;
671 }
672
673 /* Module notifier call back, checking event on the module */
674 static int trace_kprobe_module_callback(struct notifier_block *nb,
675                                        unsigned long val, void *data)
676 {
677         struct module *mod = data;
678         struct dyn_event *pos;
679         struct trace_kprobe *tk;
680         int ret;
681
682         if (val != MODULE_STATE_COMING)
683                 return NOTIFY_DONE;
684
685         /* Update probes on coming module */
686         mutex_lock(&event_mutex);
687         for_each_trace_kprobe(tk, pos) {
688                 if (trace_kprobe_within_module(tk, mod)) {
689                         /* Don't need to check busy - this should have gone. */
690                         __unregister_trace_kprobe(tk);
691                         ret = __register_trace_kprobe(tk);
692                         if (ret)
693                                 pr_warn("Failed to re-register probe %s on %s: %d\n",
694                                         trace_probe_name(&tk->tp),
695                                         module_name(mod), ret);
696                 }
697         }
698         mutex_unlock(&event_mutex);
699
700         return NOTIFY_DONE;
701 }
702
703 static struct notifier_block trace_kprobe_module_nb = {
704         .notifier_call = trace_kprobe_module_callback,
705         .priority = 1   /* Invoked after kprobe module callback */
706 };
707
708 static int __trace_kprobe_create(int argc, const char *argv[])
709 {
710         /*
711          * Argument syntax:
712          *  - Add kprobe:
713          *      p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
714          *  - Add kretprobe:
715          *      r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS]
716          *    Or
717          *      p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS]
718          *
719          * Fetch args:
720          *  $retval     : fetch return value
721          *  $stack      : fetch stack address
722          *  $stackN     : fetch Nth of stack (N:0-)
723          *  $comm       : fetch current task comm
724          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
725          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
726          *  %REG        : fetch register REG
727          * Dereferencing memory fetch:
728          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
729          * Alias name of args:
730          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
731          * Type of args:
732          *  FETCHARG:TYPE : use TYPE instead of unsigned long.
733          */
734         struct trace_kprobe *tk = NULL;
735         int i, len, ret = 0;
736         bool is_return = false;
737         char *symbol = NULL, *tmp = NULL;
738         const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
739         enum probe_print_type ptype;
740         int maxactive = 0;
741         long offset = 0;
742         void *addr = NULL;
743         char buf[MAX_EVENT_NAME_LEN];
744         char gbuf[MAX_EVENT_NAME_LEN];
745         unsigned int flags = TPARG_FL_KERNEL;
746
747         switch (argv[0][0]) {
748         case 'r':
749                 is_return = true;
750                 break;
751         case 'p':
752                 break;
753         default:
754                 return -ECANCELED;
755         }
756         if (argc < 2)
757                 return -ECANCELED;
758
759         trace_probe_log_init("trace_kprobe", argc, argv);
760
761         event = strchr(&argv[0][1], ':');
762         if (event)
763                 event++;
764
765         if (isdigit(argv[0][1])) {
766                 if (!is_return) {
767                         trace_probe_log_err(1, MAXACT_NO_KPROBE);
768                         goto parse_error;
769                 }
770                 if (event)
771                         len = event - &argv[0][1] - 1;
772                 else
773                         len = strlen(&argv[0][1]);
774                 if (len > MAX_EVENT_NAME_LEN - 1) {
775                         trace_probe_log_err(1, BAD_MAXACT);
776                         goto parse_error;
777                 }
778                 memcpy(buf, &argv[0][1], len);
779                 buf[len] = '\0';
780                 ret = kstrtouint(buf, 0, &maxactive);
781                 if (ret || !maxactive) {
782                         trace_probe_log_err(1, BAD_MAXACT);
783                         goto parse_error;
784                 }
785                 /* kretprobes instances are iterated over via a list. The
786                  * maximum should stay reasonable.
787                  */
788                 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
789                         trace_probe_log_err(1, MAXACT_TOO_BIG);
790                         goto parse_error;
791                 }
792         }
793
794         /* try to parse an address. if that fails, try to read the
795          * input as a symbol. */
796         if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
797                 trace_probe_log_set_index(1);
798                 /* Check whether uprobe event specified */
799                 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
800                         ret = -ECANCELED;
801                         goto error;
802                 }
803                 /* a symbol specified */
804                 symbol = kstrdup(argv[1], GFP_KERNEL);
805                 if (!symbol)
806                         return -ENOMEM;
807
808                 tmp = strchr(symbol, '%');
809                 if (tmp) {
810                         if (!strcmp(tmp, "%return")) {
811                                 *tmp = '\0';
812                                 is_return = true;
813                         } else {
814                                 trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
815                                 goto parse_error;
816                         }
817                 }
818
819                 /* TODO: support .init module functions */
820                 ret = traceprobe_split_symbol_offset(symbol, &offset);
821                 if (ret || offset < 0 || offset > UINT_MAX) {
822                         trace_probe_log_err(0, BAD_PROBE_ADDR);
823                         goto parse_error;
824                 }
825                 if (is_return)
826                         flags |= TPARG_FL_RETURN;
827                 ret = kprobe_on_func_entry(NULL, symbol, offset);
828                 if (ret == 0)
829                         flags |= TPARG_FL_FENTRY;
830                 /* Defer the ENOENT case until register kprobe */
831                 if (ret == -EINVAL && is_return) {
832                         trace_probe_log_err(0, BAD_RETPROBE);
833                         goto parse_error;
834                 }
835         }
836
837         trace_probe_log_set_index(0);
838         if (event) {
839                 ret = traceprobe_parse_event_name(&event, &group, gbuf,
840                                                   event - argv[0]);
841                 if (ret)
842                         goto parse_error;
843         }
844
845         if (!event) {
846                 /* Make a new event name */
847                 if (symbol)
848                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
849                                  is_return ? 'r' : 'p', symbol, offset);
850                 else
851                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
852                                  is_return ? 'r' : 'p', addr);
853                 sanitize_event_name(buf);
854                 event = buf;
855         }
856
857         /* setup a probe */
858         tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
859                                argc - 2, is_return);
860         if (IS_ERR(tk)) {
861                 ret = PTR_ERR(tk);
862                 /* This must return -ENOMEM, else there is a bug */
863                 WARN_ON_ONCE(ret != -ENOMEM);
864                 goto out;       /* We know tk is not allocated */
865         }
866         argc -= 2; argv += 2;
867
868         /* parse arguments */
869         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
870                 trace_probe_log_set_index(i + 2);
871                 ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], flags);
872                 if (ret)
873                         goto error;     /* This can be -ENOMEM */
874         }
875
876         ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
877         ret = traceprobe_set_print_fmt(&tk->tp, ptype);
878         if (ret < 0)
879                 goto error;
880
881         ret = register_trace_kprobe(tk);
882         if (ret) {
883                 trace_probe_log_set_index(1);
884                 if (ret == -EILSEQ)
885                         trace_probe_log_err(0, BAD_INSN_BNDRY);
886                 else if (ret == -ENOENT)
887                         trace_probe_log_err(0, BAD_PROBE_ADDR);
888                 else if (ret != -ENOMEM && ret != -EEXIST)
889                         trace_probe_log_err(0, FAIL_REG_PROBE);
890                 goto error;
891         }
892
893 out:
894         trace_probe_log_clear();
895         kfree(symbol);
896         return ret;
897
898 parse_error:
899         ret = -EINVAL;
900 error:
901         free_trace_kprobe(tk);
902         goto out;
903 }
904
905 static int trace_kprobe_create(const char *raw_command)
906 {
907         return trace_probe_create(raw_command, __trace_kprobe_create);
908 }
909
910 static int create_or_delete_trace_kprobe(const char *raw_command)
911 {
912         int ret;
913
914         if (raw_command[0] == '-')
915                 return dyn_event_release(raw_command, &trace_kprobe_ops);
916
917         ret = trace_kprobe_create(raw_command);
918         return ret == -ECANCELED ? -EINVAL : ret;
919 }
920
921 static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
922 {
923         return create_or_delete_trace_kprobe(cmd->seq.buffer);
924 }
925
926 /**
927  * kprobe_event_cmd_init - Initialize a kprobe event command object
928  * @cmd: A pointer to the dynevent_cmd struct representing the new event
929  * @buf: A pointer to the buffer used to build the command
930  * @maxlen: The length of the buffer passed in @buf
931  *
932  * Initialize a synthetic event command object.  Use this before
933  * calling any of the other kprobe_event functions.
934  */
935 void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
936 {
937         dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
938                           trace_kprobe_run_command);
939 }
940 EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
941
942 /**
943  * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
944  * @cmd: A pointer to the dynevent_cmd struct representing the new event
945  * @name: The name of the kprobe event
946  * @loc: The location of the kprobe event
947  * @kretprobe: Is this a return probe?
948  * @args: Variable number of arg (pairs), one pair for each field
949  *
950  * NOTE: Users normally won't want to call this function directly, but
951  * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
952  * adds a NULL to the end of the arg list.  If this function is used
953  * directly, make sure the last arg in the variable arg list is NULL.
954  *
955  * Generate a kprobe event command to be executed by
956  * kprobe_event_gen_cmd_end().  This function can be used to generate the
957  * complete command or only the first part of it; in the latter case,
958  * kprobe_event_add_fields() can be used to add more fields following this.
959  *
960  * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
961  * returns -EINVAL if @loc == NULL.
962  *
963  * Return: 0 if successful, error otherwise.
964  */
965 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
966                                  const char *name, const char *loc, ...)
967 {
968         char buf[MAX_EVENT_NAME_LEN];
969         struct dynevent_arg arg;
970         va_list args;
971         int ret;
972
973         if (cmd->type != DYNEVENT_TYPE_KPROBE)
974                 return -EINVAL;
975
976         if (!loc)
977                 return -EINVAL;
978
979         if (kretprobe)
980                 snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
981         else
982                 snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
983
984         ret = dynevent_str_add(cmd, buf);
985         if (ret)
986                 return ret;
987
988         dynevent_arg_init(&arg, 0);
989         arg.str = loc;
990         ret = dynevent_arg_add(cmd, &arg, NULL);
991         if (ret)
992                 return ret;
993
994         va_start(args, loc);
995         for (;;) {
996                 const char *field;
997
998                 field = va_arg(args, const char *);
999                 if (!field)
1000                         break;
1001
1002                 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1003                         ret = -EINVAL;
1004                         break;
1005                 }
1006
1007                 arg.str = field;
1008                 ret = dynevent_arg_add(cmd, &arg, NULL);
1009                 if (ret)
1010                         break;
1011         }
1012         va_end(args);
1013
1014         return ret;
1015 }
1016 EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1017
1018 /**
1019  * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1020  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1021  * @args: Variable number of arg (pairs), one pair for each field
1022  *
1023  * NOTE: Users normally won't want to call this function directly, but
1024  * rather use the kprobe_event_add_fields() wrapper, which
1025  * automatically adds a NULL to the end of the arg list.  If this
1026  * function is used directly, make sure the last arg in the variable
1027  * arg list is NULL.
1028  *
1029  * Add probe fields to an existing kprobe command using a variable
1030  * list of args.  Fields are added in the same order they're listed.
1031  *
1032  * Return: 0 if successful, error otherwise.
1033  */
1034 int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1035 {
1036         struct dynevent_arg arg;
1037         va_list args;
1038         int ret = 0;
1039
1040         if (cmd->type != DYNEVENT_TYPE_KPROBE)
1041                 return -EINVAL;
1042
1043         dynevent_arg_init(&arg, 0);
1044
1045         va_start(args, cmd);
1046         for (;;) {
1047                 const char *field;
1048
1049                 field = va_arg(args, const char *);
1050                 if (!field)
1051                         break;
1052
1053                 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1054                         ret = -EINVAL;
1055                         break;
1056                 }
1057
1058                 arg.str = field;
1059                 ret = dynevent_arg_add(cmd, &arg, NULL);
1060                 if (ret)
1061                         break;
1062         }
1063         va_end(args);
1064
1065         return ret;
1066 }
1067 EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1068
1069 /**
1070  * kprobe_event_delete - Delete a kprobe event
1071  * @name: The name of the kprobe event to delete
1072  *
1073  * Delete a kprobe event with the give @name from kernel code rather
1074  * than directly from the command line.
1075  *
1076  * Return: 0 if successful, error otherwise.
1077  */
1078 int kprobe_event_delete(const char *name)
1079 {
1080         char buf[MAX_EVENT_NAME_LEN];
1081
1082         snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1083
1084         return create_or_delete_trace_kprobe(buf);
1085 }
1086 EXPORT_SYMBOL_GPL(kprobe_event_delete);
1087
1088 static int trace_kprobe_release(struct dyn_event *ev)
1089 {
1090         struct trace_kprobe *tk = to_trace_kprobe(ev);
1091         int ret = unregister_trace_kprobe(tk);
1092
1093         if (!ret)
1094                 free_trace_kprobe(tk);
1095         return ret;
1096 }
1097
1098 static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1099 {
1100         struct trace_kprobe *tk = to_trace_kprobe(ev);
1101         int i;
1102
1103         seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1104         if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1105                 seq_printf(m, "%d", tk->rp.maxactive);
1106         seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1107                                 trace_probe_name(&tk->tp));
1108
1109         if (!tk->symbol)
1110                 seq_printf(m, " 0x%p", tk->rp.kp.addr);
1111         else if (tk->rp.kp.offset)
1112                 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1113                            tk->rp.kp.offset);
1114         else
1115                 seq_printf(m, " %s", trace_kprobe_symbol(tk));
1116
1117         for (i = 0; i < tk->tp.nr_args; i++)
1118                 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1119         seq_putc(m, '\n');
1120
1121         return 0;
1122 }
1123
1124 static int probes_seq_show(struct seq_file *m, void *v)
1125 {
1126         struct dyn_event *ev = v;
1127
1128         if (!is_trace_kprobe(ev))
1129                 return 0;
1130
1131         return trace_kprobe_show(m, ev);
1132 }
1133
1134 static const struct seq_operations probes_seq_op = {
1135         .start  = dyn_event_seq_start,
1136         .next   = dyn_event_seq_next,
1137         .stop   = dyn_event_seq_stop,
1138         .show   = probes_seq_show
1139 };
1140
1141 static int probes_open(struct inode *inode, struct file *file)
1142 {
1143         int ret;
1144
1145         ret = security_locked_down(LOCKDOWN_TRACEFS);
1146         if (ret)
1147                 return ret;
1148
1149         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1150                 ret = dyn_events_release_all(&trace_kprobe_ops);
1151                 if (ret < 0)
1152                         return ret;
1153         }
1154
1155         return seq_open(file, &probes_seq_op);
1156 }
1157
1158 static ssize_t probes_write(struct file *file, const char __user *buffer,
1159                             size_t count, loff_t *ppos)
1160 {
1161         return trace_parse_run_command(file, buffer, count, ppos,
1162                                        create_or_delete_trace_kprobe);
1163 }
1164
1165 static const struct file_operations kprobe_events_ops = {
1166         .owner          = THIS_MODULE,
1167         .open           = probes_open,
1168         .read           = seq_read,
1169         .llseek         = seq_lseek,
1170         .release        = seq_release,
1171         .write          = probes_write,
1172 };
1173
1174 /* Probes profiling interfaces */
1175 static int probes_profile_seq_show(struct seq_file *m, void *v)
1176 {
1177         struct dyn_event *ev = v;
1178         struct trace_kprobe *tk;
1179         unsigned long nmissed;
1180
1181         if (!is_trace_kprobe(ev))
1182                 return 0;
1183
1184         tk = to_trace_kprobe(ev);
1185         nmissed = trace_kprobe_is_return(tk) ?
1186                 tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
1187         seq_printf(m, "  %-44s %15lu %15lu\n",
1188                    trace_probe_name(&tk->tp),
1189                    trace_kprobe_nhit(tk),
1190                    nmissed);
1191
1192         return 0;
1193 }
1194
1195 static const struct seq_operations profile_seq_op = {
1196         .start  = dyn_event_seq_start,
1197         .next   = dyn_event_seq_next,
1198         .stop   = dyn_event_seq_stop,
1199         .show   = probes_profile_seq_show
1200 };
1201
1202 static int profile_open(struct inode *inode, struct file *file)
1203 {
1204         int ret;
1205
1206         ret = security_locked_down(LOCKDOWN_TRACEFS);
1207         if (ret)
1208                 return ret;
1209
1210         return seq_open(file, &profile_seq_op);
1211 }
1212
1213 static const struct file_operations kprobe_profile_ops = {
1214         .owner          = THIS_MODULE,
1215         .open           = profile_open,
1216         .read           = seq_read,
1217         .llseek         = seq_lseek,
1218         .release        = seq_release,
1219 };
1220
1221 /* Kprobe specific fetch functions */
1222
1223 /* Return the length of string -- including null terminal byte */
1224 static nokprobe_inline int
1225 fetch_store_strlen_user(unsigned long addr)
1226 {
1227         return kern_fetch_store_strlen_user(addr);
1228 }
1229
1230 /* Return the length of string -- including null terminal byte */
1231 static nokprobe_inline int
1232 fetch_store_strlen(unsigned long addr)
1233 {
1234         return kern_fetch_store_strlen(addr);
1235 }
1236
1237 /*
1238  * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1239  * with max length and relative data location.
1240  */
1241 static nokprobe_inline int
1242 fetch_store_string_user(unsigned long addr, void *dest, void *base)
1243 {
1244         return kern_fetch_store_string_user(addr, dest, base);
1245 }
1246
1247 /*
1248  * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1249  * length and relative data location.
1250  */
1251 static nokprobe_inline int
1252 fetch_store_string(unsigned long addr, void *dest, void *base)
1253 {
1254         return kern_fetch_store_string(addr, dest, base);
1255 }
1256
1257 static nokprobe_inline int
1258 probe_mem_read_user(void *dest, void *src, size_t size)
1259 {
1260         const void __user *uaddr =  (__force const void __user *)src;
1261
1262         return copy_from_user_nofault(dest, uaddr, size);
1263 }
1264
1265 static nokprobe_inline int
1266 probe_mem_read(void *dest, void *src, size_t size)
1267 {
1268 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1269         if ((unsigned long)src < TASK_SIZE)
1270                 return probe_mem_read_user(dest, src, size);
1271 #endif
1272         return copy_from_kernel_nofault(dest, src, size);
1273 }
1274
1275 /* Note that we don't verify it, since the code does not come from user space */
1276 static int
1277 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
1278                    void *base)
1279 {
1280         struct pt_regs *regs = rec;
1281         unsigned long val;
1282
1283 retry:
1284         /* 1st stage: get value from context */
1285         switch (code->op) {
1286         case FETCH_OP_REG:
1287                 val = regs_get_register(regs, code->param);
1288                 break;
1289         case FETCH_OP_STACK:
1290                 val = regs_get_kernel_stack_nth(regs, code->param);
1291                 break;
1292         case FETCH_OP_STACKP:
1293                 val = kernel_stack_pointer(regs);
1294                 break;
1295         case FETCH_OP_RETVAL:
1296                 val = regs_return_value(regs);
1297                 break;
1298         case FETCH_OP_IMM:
1299                 val = code->immediate;
1300                 break;
1301         case FETCH_OP_COMM:
1302                 val = (unsigned long)current->comm;
1303                 break;
1304         case FETCH_OP_DATA:
1305                 val = (unsigned long)code->data;
1306                 break;
1307 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1308         case FETCH_OP_ARG:
1309                 val = regs_get_kernel_argument(regs, code->param);
1310                 break;
1311 #endif
1312         case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
1313                 code++;
1314                 goto retry;
1315         default:
1316                 return -EILSEQ;
1317         }
1318         code++;
1319
1320         return process_fetch_insn_bottom(code, val, dest, base);
1321 }
1322 NOKPROBE_SYMBOL(process_fetch_insn)
1323
1324 /* Kprobe handler */
1325 static nokprobe_inline void
1326 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1327                     struct trace_event_file *trace_file)
1328 {
1329         struct kprobe_trace_entry_head *entry;
1330         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1331         struct trace_event_buffer fbuffer;
1332         int dsize;
1333
1334         WARN_ON(call != trace_file->event_call);
1335
1336         if (trace_trigger_soft_disabled(trace_file))
1337                 return;
1338
1339         dsize = __get_data_size(&tk->tp, regs);
1340
1341         entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1342                                            sizeof(*entry) + tk->tp.size + dsize);
1343         if (!entry)
1344                 return;
1345
1346         fbuffer.regs = regs;
1347         entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1348         entry->ip = (unsigned long)tk->rp.kp.addr;
1349         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1350
1351         trace_event_buffer_commit(&fbuffer);
1352 }
1353
1354 static void
1355 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1356 {
1357         struct event_file_link *link;
1358
1359         trace_probe_for_each_link_rcu(link, &tk->tp)
1360                 __kprobe_trace_func(tk, regs, link->file);
1361 }
1362 NOKPROBE_SYMBOL(kprobe_trace_func);
1363
1364 /* Kretprobe handler */
1365 static nokprobe_inline void
1366 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1367                        struct pt_regs *regs,
1368                        struct trace_event_file *trace_file)
1369 {
1370         struct kretprobe_trace_entry_head *entry;
1371         struct trace_event_buffer fbuffer;
1372         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1373         int dsize;
1374
1375         WARN_ON(call != trace_file->event_call);
1376
1377         if (trace_trigger_soft_disabled(trace_file))
1378                 return;
1379
1380         dsize = __get_data_size(&tk->tp, regs);
1381
1382         entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1383                                            sizeof(*entry) + tk->tp.size + dsize);
1384         if (!entry)
1385                 return;
1386
1387         fbuffer.regs = regs;
1388         entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1389         entry->func = (unsigned long)tk->rp.kp.addr;
1390         entry->ret_ip = get_kretprobe_retaddr(ri);
1391         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1392
1393         trace_event_buffer_commit(&fbuffer);
1394 }
1395
1396 static void
1397 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1398                      struct pt_regs *regs)
1399 {
1400         struct event_file_link *link;
1401
1402         trace_probe_for_each_link_rcu(link, &tk->tp)
1403                 __kretprobe_trace_func(tk, ri, regs, link->file);
1404 }
1405 NOKPROBE_SYMBOL(kretprobe_trace_func);
1406
1407 /* Event entry printers */
1408 static enum print_line_t
1409 print_kprobe_event(struct trace_iterator *iter, int flags,
1410                    struct trace_event *event)
1411 {
1412         struct kprobe_trace_entry_head *field;
1413         struct trace_seq *s = &iter->seq;
1414         struct trace_probe *tp;
1415
1416         field = (struct kprobe_trace_entry_head *)iter->ent;
1417         tp = trace_probe_primary_from_call(
1418                 container_of(event, struct trace_event_call, event));
1419         if (WARN_ON_ONCE(!tp))
1420                 goto out;
1421
1422         trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1423
1424         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1425                 goto out;
1426
1427         trace_seq_putc(s, ')');
1428
1429         if (print_probe_args(s, tp->args, tp->nr_args,
1430                              (u8 *)&field[1], field) < 0)
1431                 goto out;
1432
1433         trace_seq_putc(s, '\n');
1434  out:
1435         return trace_handle_return(s);
1436 }
1437
1438 static enum print_line_t
1439 print_kretprobe_event(struct trace_iterator *iter, int flags,
1440                       struct trace_event *event)
1441 {
1442         struct kretprobe_trace_entry_head *field;
1443         struct trace_seq *s = &iter->seq;
1444         struct trace_probe *tp;
1445
1446         field = (struct kretprobe_trace_entry_head *)iter->ent;
1447         tp = trace_probe_primary_from_call(
1448                 container_of(event, struct trace_event_call, event));
1449         if (WARN_ON_ONCE(!tp))
1450                 goto out;
1451
1452         trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1453
1454         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1455                 goto out;
1456
1457         trace_seq_puts(s, " <- ");
1458
1459         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1460                 goto out;
1461
1462         trace_seq_putc(s, ')');
1463
1464         if (print_probe_args(s, tp->args, tp->nr_args,
1465                              (u8 *)&field[1], field) < 0)
1466                 goto out;
1467
1468         trace_seq_putc(s, '\n');
1469
1470  out:
1471         return trace_handle_return(s);
1472 }
1473
1474
1475 static int kprobe_event_define_fields(struct trace_event_call *event_call)
1476 {
1477         int ret;
1478         struct kprobe_trace_entry_head field;
1479         struct trace_probe *tp;
1480
1481         tp = trace_probe_primary_from_call(event_call);
1482         if (WARN_ON_ONCE(!tp))
1483                 return -ENOENT;
1484
1485         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1486
1487         return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1488 }
1489
1490 static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1491 {
1492         int ret;
1493         struct kretprobe_trace_entry_head field;
1494         struct trace_probe *tp;
1495
1496         tp = trace_probe_primary_from_call(event_call);
1497         if (WARN_ON_ONCE(!tp))
1498                 return -ENOENT;
1499
1500         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1501         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1502
1503         return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1504 }
1505
1506 #ifdef CONFIG_PERF_EVENTS
1507
1508 /* Kprobe profile handler */
1509 static int
1510 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1511 {
1512         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1513         struct kprobe_trace_entry_head *entry;
1514         struct hlist_head *head;
1515         int size, __size, dsize;
1516         int rctx;
1517
1518         if (bpf_prog_array_valid(call)) {
1519                 unsigned long orig_ip = instruction_pointer(regs);
1520                 int ret;
1521
1522                 ret = trace_call_bpf(call, regs);
1523
1524                 /*
1525                  * We need to check and see if we modified the pc of the
1526                  * pt_regs, and if so return 1 so that we don't do the
1527                  * single stepping.
1528                  */
1529                 if (orig_ip != instruction_pointer(regs))
1530                         return 1;
1531                 if (!ret)
1532                         return 0;
1533         }
1534
1535         head = this_cpu_ptr(call->perf_events);
1536         if (hlist_empty(head))
1537                 return 0;
1538
1539         dsize = __get_data_size(&tk->tp, regs);
1540         __size = sizeof(*entry) + tk->tp.size + dsize;
1541         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1542         size -= sizeof(u32);
1543
1544         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1545         if (!entry)
1546                 return 0;
1547
1548         entry->ip = (unsigned long)tk->rp.kp.addr;
1549         memset(&entry[1], 0, dsize);
1550         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1551         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1552                               head, NULL);
1553         return 0;
1554 }
1555 NOKPROBE_SYMBOL(kprobe_perf_func);
1556
1557 /* Kretprobe profile handler */
1558 static void
1559 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1560                     struct pt_regs *regs)
1561 {
1562         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1563         struct kretprobe_trace_entry_head *entry;
1564         struct hlist_head *head;
1565         int size, __size, dsize;
1566         int rctx;
1567
1568         if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1569                 return;
1570
1571         head = this_cpu_ptr(call->perf_events);
1572         if (hlist_empty(head))
1573                 return;
1574
1575         dsize = __get_data_size(&tk->tp, regs);
1576         __size = sizeof(*entry) + tk->tp.size + dsize;
1577         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1578         size -= sizeof(u32);
1579
1580         entry = perf_trace_buf_alloc(size, NULL, &rctx);
1581         if (!entry)
1582                 return;
1583
1584         entry->func = (unsigned long)tk->rp.kp.addr;
1585         entry->ret_ip = get_kretprobe_retaddr(ri);
1586         store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1587         perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1588                               head, NULL);
1589 }
1590 NOKPROBE_SYMBOL(kretprobe_perf_func);
1591
1592 int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1593                         const char **symbol, u64 *probe_offset,
1594                         u64 *probe_addr, bool perf_type_tracepoint)
1595 {
1596         const char *pevent = trace_event_name(event->tp_event);
1597         const char *group = event->tp_event->class->system;
1598         struct trace_kprobe *tk;
1599
1600         if (perf_type_tracepoint)
1601                 tk = find_trace_kprobe(pevent, group);
1602         else
1603                 tk = trace_kprobe_primary_from_call(event->tp_event);
1604         if (!tk)
1605                 return -EINVAL;
1606
1607         *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1608                                               : BPF_FD_TYPE_KPROBE;
1609         if (tk->symbol) {
1610                 *symbol = tk->symbol;
1611                 *probe_offset = tk->rp.kp.offset;
1612                 *probe_addr = 0;
1613         } else {
1614                 *symbol = NULL;
1615                 *probe_offset = 0;
1616                 *probe_addr = (unsigned long)tk->rp.kp.addr;
1617         }
1618         return 0;
1619 }
1620 #endif  /* CONFIG_PERF_EVENTS */
1621
1622 /*
1623  * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1624  *
1625  * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1626  * lockless, but we can't race with this __init function.
1627  */
1628 static int kprobe_register(struct trace_event_call *event,
1629                            enum trace_reg type, void *data)
1630 {
1631         struct trace_event_file *file = data;
1632
1633         switch (type) {
1634         case TRACE_REG_REGISTER:
1635                 return enable_trace_kprobe(event, file);
1636         case TRACE_REG_UNREGISTER:
1637                 return disable_trace_kprobe(event, file);
1638
1639 #ifdef CONFIG_PERF_EVENTS
1640         case TRACE_REG_PERF_REGISTER:
1641                 return enable_trace_kprobe(event, NULL);
1642         case TRACE_REG_PERF_UNREGISTER:
1643                 return disable_trace_kprobe(event, NULL);
1644         case TRACE_REG_PERF_OPEN:
1645         case TRACE_REG_PERF_CLOSE:
1646         case TRACE_REG_PERF_ADD:
1647         case TRACE_REG_PERF_DEL:
1648                 return 0;
1649 #endif
1650         }
1651         return 0;
1652 }
1653
1654 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1655 {
1656         struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1657         int ret = 0;
1658
1659         raw_cpu_inc(*tk->nhit);
1660
1661         if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1662                 kprobe_trace_func(tk, regs);
1663 #ifdef CONFIG_PERF_EVENTS
1664         if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1665                 ret = kprobe_perf_func(tk, regs);
1666 #endif
1667         return ret;
1668 }
1669 NOKPROBE_SYMBOL(kprobe_dispatcher);
1670
1671 static int
1672 kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1673 {
1674         struct kretprobe *rp = get_kretprobe(ri);
1675         struct trace_kprobe *tk;
1676
1677         /*
1678          * There is a small chance that get_kretprobe(ri) returns NULL when
1679          * the kretprobe is unregister on another CPU between kretprobe's
1680          * trampoline_handler and this function.
1681          */
1682         if (unlikely(!rp))
1683                 return 0;
1684
1685         tk = container_of(rp, struct trace_kprobe, rp);
1686         raw_cpu_inc(*tk->nhit);
1687
1688         if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1689                 kretprobe_trace_func(tk, ri, regs);
1690 #ifdef CONFIG_PERF_EVENTS
1691         if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1692                 kretprobe_perf_func(tk, ri, regs);
1693 #endif
1694         return 0;       /* We don't tweak kernel, so just return 0 */
1695 }
1696 NOKPROBE_SYMBOL(kretprobe_dispatcher);
1697
1698 static struct trace_event_functions kretprobe_funcs = {
1699         .trace          = print_kretprobe_event
1700 };
1701
1702 static struct trace_event_functions kprobe_funcs = {
1703         .trace          = print_kprobe_event
1704 };
1705
1706 static struct trace_event_fields kretprobe_fields_array[] = {
1707         { .type = TRACE_FUNCTION_TYPE,
1708           .define_fields = kretprobe_event_define_fields },
1709         {}
1710 };
1711
1712 static struct trace_event_fields kprobe_fields_array[] = {
1713         { .type = TRACE_FUNCTION_TYPE,
1714           .define_fields = kprobe_event_define_fields },
1715         {}
1716 };
1717
1718 static inline void init_trace_event_call(struct trace_kprobe *tk)
1719 {
1720         struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1721
1722         if (trace_kprobe_is_return(tk)) {
1723                 call->event.funcs = &kretprobe_funcs;
1724                 call->class->fields_array = kretprobe_fields_array;
1725         } else {
1726                 call->event.funcs = &kprobe_funcs;
1727                 call->class->fields_array = kprobe_fields_array;
1728         }
1729
1730         call->flags = TRACE_EVENT_FL_KPROBE;
1731         call->class->reg = kprobe_register;
1732 }
1733
1734 static int register_kprobe_event(struct trace_kprobe *tk)
1735 {
1736         init_trace_event_call(tk);
1737
1738         return trace_probe_register_event_call(&tk->tp);
1739 }
1740
1741 static int unregister_kprobe_event(struct trace_kprobe *tk)
1742 {
1743         return trace_probe_unregister_event_call(&tk->tp);
1744 }
1745
1746 #ifdef CONFIG_PERF_EVENTS
1747 /* create a trace_kprobe, but don't add it to global lists */
1748 struct trace_event_call *
1749 create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1750                           bool is_return)
1751 {
1752         enum probe_print_type ptype;
1753         struct trace_kprobe *tk;
1754         int ret;
1755         char *event;
1756
1757         /*
1758          * local trace_kprobes are not added to dyn_event, so they are never
1759          * searched in find_trace_kprobe(). Therefore, there is no concern of
1760          * duplicated name here.
1761          */
1762         event = func ? func : "DUMMY_EVENT";
1763
1764         tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1765                                 offs, 0 /* maxactive */, 0 /* nargs */,
1766                                 is_return);
1767
1768         if (IS_ERR(tk)) {
1769                 pr_info("Failed to allocate trace_probe.(%d)\n",
1770                         (int)PTR_ERR(tk));
1771                 return ERR_CAST(tk);
1772         }
1773
1774         init_trace_event_call(tk);
1775
1776         ptype = trace_kprobe_is_return(tk) ?
1777                 PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1778         if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) {
1779                 ret = -ENOMEM;
1780                 goto error;
1781         }
1782
1783         ret = __register_trace_kprobe(tk);
1784         if (ret < 0)
1785                 goto error;
1786
1787         return trace_probe_event_call(&tk->tp);
1788 error:
1789         free_trace_kprobe(tk);
1790         return ERR_PTR(ret);
1791 }
1792
1793 void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1794 {
1795         struct trace_kprobe *tk;
1796
1797         tk = trace_kprobe_primary_from_call(event_call);
1798         if (unlikely(!tk))
1799                 return;
1800
1801         if (trace_probe_is_enabled(&tk->tp)) {
1802                 WARN_ON(1);
1803                 return;
1804         }
1805
1806         __unregister_trace_kprobe(tk);
1807
1808         free_trace_kprobe(tk);
1809 }
1810 #endif /* CONFIG_PERF_EVENTS */
1811
1812 static __init void enable_boot_kprobe_events(void)
1813 {
1814         struct trace_array *tr = top_trace_array();
1815         struct trace_event_file *file;
1816         struct trace_kprobe *tk;
1817         struct dyn_event *pos;
1818
1819         mutex_lock(&event_mutex);
1820         for_each_trace_kprobe(tk, pos) {
1821                 list_for_each_entry(file, &tr->events, list)
1822                         if (file->event_call == trace_probe_event_call(&tk->tp))
1823                                 trace_event_enable_disable(file, 1, 0);
1824         }
1825         mutex_unlock(&event_mutex);
1826 }
1827
1828 static __init void setup_boot_kprobe_events(void)
1829 {
1830         char *p, *cmd = kprobe_boot_events_buf;
1831         int ret;
1832
1833         strreplace(kprobe_boot_events_buf, ',', ' ');
1834
1835         while (cmd && *cmd != '\0') {
1836                 p = strchr(cmd, ';');
1837                 if (p)
1838                         *p++ = '\0';
1839
1840                 ret = create_or_delete_trace_kprobe(cmd);
1841                 if (ret)
1842                         pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1843
1844                 cmd = p;
1845         }
1846
1847         enable_boot_kprobe_events();
1848 }
1849
1850 /*
1851  * Register dynevent at core_initcall. This allows kernel to setup kprobe
1852  * events in postcore_initcall without tracefs.
1853  */
1854 static __init int init_kprobe_trace_early(void)
1855 {
1856         int ret;
1857
1858         ret = dyn_event_register(&trace_kprobe_ops);
1859         if (ret)
1860                 return ret;
1861
1862         if (register_module_notifier(&trace_kprobe_module_nb))
1863                 return -EINVAL;
1864
1865         return 0;
1866 }
1867 core_initcall(init_kprobe_trace_early);
1868
1869 /* Make a tracefs interface for controlling probe points */
1870 static __init int init_kprobe_trace(void)
1871 {
1872         int ret;
1873
1874         ret = tracing_init_dentry();
1875         if (ret)
1876                 return 0;
1877
1878         /* Event list interface */
1879         trace_create_file("kprobe_events", TRACE_MODE_WRITE,
1880                           NULL, NULL, &kprobe_events_ops);
1881
1882         /* Profile interface */
1883         trace_create_file("kprobe_profile", TRACE_MODE_READ,
1884                           NULL, NULL, &kprobe_profile_ops);
1885
1886         setup_boot_kprobe_events();
1887
1888         return 0;
1889 }
1890 fs_initcall(init_kprobe_trace);
1891
1892
1893 #ifdef CONFIG_FTRACE_STARTUP_TEST
1894 static __init struct trace_event_file *
1895 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1896 {
1897         struct trace_event_file *file;
1898
1899         list_for_each_entry(file, &tr->events, list)
1900                 if (file->event_call == trace_probe_event_call(&tk->tp))
1901                         return file;
1902
1903         return NULL;
1904 }
1905
1906 /*
1907  * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1908  * stage, we can do this lockless.
1909  */
1910 static __init int kprobe_trace_self_tests_init(void)
1911 {
1912         int ret, warn = 0;
1913         int (*target)(int, int, int, int, int, int);
1914         struct trace_kprobe *tk;
1915         struct trace_event_file *file;
1916
1917         if (tracing_is_disabled())
1918                 return -ENODEV;
1919
1920         if (tracing_selftest_disabled)
1921                 return 0;
1922
1923         target = kprobe_trace_selftest_target;
1924
1925         pr_info("Testing kprobe tracing: ");
1926
1927         ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
1928         if (WARN_ON_ONCE(ret)) {
1929                 pr_warn("error on probing function entry.\n");
1930                 warn++;
1931         } else {
1932                 /* Enable trace point */
1933                 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1934                 if (WARN_ON_ONCE(tk == NULL)) {
1935                         pr_warn("error on getting new probe.\n");
1936                         warn++;
1937                 } else {
1938                         file = find_trace_probe_file(tk, top_trace_array());
1939                         if (WARN_ON_ONCE(file == NULL)) {
1940                                 pr_warn("error on getting probe file.\n");
1941                                 warn++;
1942                         } else
1943                                 enable_trace_kprobe(
1944                                         trace_probe_event_call(&tk->tp), file);
1945                 }
1946         }
1947
1948         ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
1949         if (WARN_ON_ONCE(ret)) {
1950                 pr_warn("error on probing function return.\n");
1951                 warn++;
1952         } else {
1953                 /* Enable trace point */
1954                 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1955                 if (WARN_ON_ONCE(tk == NULL)) {
1956                         pr_warn("error on getting 2nd new probe.\n");
1957                         warn++;
1958                 } else {
1959                         file = find_trace_probe_file(tk, top_trace_array());
1960                         if (WARN_ON_ONCE(file == NULL)) {
1961                                 pr_warn("error on getting probe file.\n");
1962                                 warn++;
1963                         } else
1964                                 enable_trace_kprobe(
1965                                         trace_probe_event_call(&tk->tp), file);
1966                 }
1967         }
1968
1969         if (warn)
1970                 goto end;
1971
1972         ret = target(1, 2, 3, 4, 5, 6);
1973
1974         /*
1975          * Not expecting an error here, the check is only to prevent the
1976          * optimizer from removing the call to target() as otherwise there
1977          * are no side-effects and the call is never performed.
1978          */
1979         if (ret != 21)
1980                 warn++;
1981
1982         /* Disable trace points before removing it */
1983         tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1984         if (WARN_ON_ONCE(tk == NULL)) {
1985                 pr_warn("error on getting test probe.\n");
1986                 warn++;
1987         } else {
1988                 if (trace_kprobe_nhit(tk) != 1) {
1989                         pr_warn("incorrect number of testprobe hits\n");
1990                         warn++;
1991                 }
1992
1993                 file = find_trace_probe_file(tk, top_trace_array());
1994                 if (WARN_ON_ONCE(file == NULL)) {
1995                         pr_warn("error on getting probe file.\n");
1996                         warn++;
1997                 } else
1998                         disable_trace_kprobe(
1999                                 trace_probe_event_call(&tk->tp), file);
2000         }
2001
2002         tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2003         if (WARN_ON_ONCE(tk == NULL)) {
2004                 pr_warn("error on getting 2nd test probe.\n");
2005                 warn++;
2006         } else {
2007                 if (trace_kprobe_nhit(tk) != 1) {
2008                         pr_warn("incorrect number of testprobe2 hits\n");
2009                         warn++;
2010                 }
2011
2012                 file = find_trace_probe_file(tk, top_trace_array());
2013                 if (WARN_ON_ONCE(file == NULL)) {
2014                         pr_warn("error on getting probe file.\n");
2015                         warn++;
2016                 } else
2017                         disable_trace_kprobe(
2018                                 trace_probe_event_call(&tk->tp), file);
2019         }
2020
2021         ret = create_or_delete_trace_kprobe("-:testprobe");
2022         if (WARN_ON_ONCE(ret)) {
2023                 pr_warn("error on deleting a probe.\n");
2024                 warn++;
2025         }
2026
2027         ret = create_or_delete_trace_kprobe("-:testprobe2");
2028         if (WARN_ON_ONCE(ret)) {
2029                 pr_warn("error on deleting a probe.\n");
2030                 warn++;
2031         }
2032
2033 end:
2034         ret = dyn_events_release_all(&trace_kprobe_ops);
2035         if (WARN_ON_ONCE(ret)) {
2036                 pr_warn("error on cleaning up probes.\n");
2037                 warn++;
2038         }
2039         /*
2040          * Wait for the optimizer work to finish. Otherwise it might fiddle
2041          * with probes in already freed __init text.
2042          */
2043         wait_for_kprobe_optimizer();
2044         if (warn)
2045                 pr_cont("NG: Some tests are failed. Please check them.\n");
2046         else
2047                 pr_cont("OK\n");
2048         return 0;
2049 }
2050
2051 late_initcall(kprobe_trace_self_tests_init);
2052
2053 #endif