Merge tag 'nfs-for-5.5-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[linux-2.6-microblaze.git] / kernel / trace / trace_events.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * event tracer
4  *
5  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6  *
7  *  - Added format output of fields of the trace point.
8  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9  *
10  */
11
12 #define pr_fmt(fmt) fmt
13
14 #include <linux/workqueue.h>
15 #include <linux/security.h>
16 #include <linux/spinlock.h>
17 #include <linux/kthread.h>
18 #include <linux/tracefs.h>
19 #include <linux/uaccess.h>
20 #include <linux/module.h>
21 #include <linux/ctype.h>
22 #include <linux/sort.h>
23 #include <linux/slab.h>
24 #include <linux/delay.h>
25
26 #include <trace/events/sched.h>
27
28 #include <asm/setup.h>
29
30 #include "trace_output.h"
31
32 #undef TRACE_SYSTEM
33 #define TRACE_SYSTEM "TRACE_SYSTEM"
34
35 DEFINE_MUTEX(event_mutex);
36
37 LIST_HEAD(ftrace_events);
38 static LIST_HEAD(ftrace_generic_fields);
39 static LIST_HEAD(ftrace_common_fields);
40
41 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
42
43 static struct kmem_cache *field_cachep;
44 static struct kmem_cache *file_cachep;
45
46 static inline int system_refcount(struct event_subsystem *system)
47 {
48         return system->ref_count;
49 }
50
51 static int system_refcount_inc(struct event_subsystem *system)
52 {
53         return system->ref_count++;
54 }
55
56 static int system_refcount_dec(struct event_subsystem *system)
57 {
58         return --system->ref_count;
59 }
60
61 /* Double loops, do not use break, only goto's work */
62 #define do_for_each_event_file(tr, file)                        \
63         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
64                 list_for_each_entry(file, &tr->events, list)
65
66 #define do_for_each_event_file_safe(tr, file)                   \
67         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
68                 struct trace_event_file *___n;                          \
69                 list_for_each_entry_safe(file, ___n, &tr->events, list)
70
71 #define while_for_each_event_file()             \
72         }
73
74 static struct ftrace_event_field *
75 __find_event_field(struct list_head *head, char *name)
76 {
77         struct ftrace_event_field *field;
78
79         list_for_each_entry(field, head, link) {
80                 if (!strcmp(field->name, name))
81                         return field;
82         }
83
84         return NULL;
85 }
86
87 struct ftrace_event_field *
88 trace_find_event_field(struct trace_event_call *call, char *name)
89 {
90         struct ftrace_event_field *field;
91         struct list_head *head;
92
93         head = trace_get_fields(call);
94         field = __find_event_field(head, name);
95         if (field)
96                 return field;
97
98         field = __find_event_field(&ftrace_generic_fields, name);
99         if (field)
100                 return field;
101
102         return __find_event_field(&ftrace_common_fields, name);
103 }
104
105 static int __trace_define_field(struct list_head *head, const char *type,
106                                 const char *name, int offset, int size,
107                                 int is_signed, int filter_type)
108 {
109         struct ftrace_event_field *field;
110
111         field = kmem_cache_alloc(field_cachep, GFP_TRACE);
112         if (!field)
113                 return -ENOMEM;
114
115         field->name = name;
116         field->type = type;
117
118         if (filter_type == FILTER_OTHER)
119                 field->filter_type = filter_assign_type(type);
120         else
121                 field->filter_type = filter_type;
122
123         field->offset = offset;
124         field->size = size;
125         field->is_signed = is_signed;
126
127         list_add(&field->link, head);
128
129         return 0;
130 }
131
132 int trace_define_field(struct trace_event_call *call, const char *type,
133                        const char *name, int offset, int size, int is_signed,
134                        int filter_type)
135 {
136         struct list_head *head;
137
138         if (WARN_ON(!call->class))
139                 return 0;
140
141         head = trace_get_fields(call);
142         return __trace_define_field(head, type, name, offset, size,
143                                     is_signed, filter_type);
144 }
145 EXPORT_SYMBOL_GPL(trace_define_field);
146
147 #define __generic_field(type, item, filter_type)                        \
148         ret = __trace_define_field(&ftrace_generic_fields, #type,       \
149                                    #item, 0, 0, is_signed_type(type),   \
150                                    filter_type);                        \
151         if (ret)                                                        \
152                 return ret;
153
154 #define __common_field(type, item)                                      \
155         ret = __trace_define_field(&ftrace_common_fields, #type,        \
156                                    "common_" #item,                     \
157                                    offsetof(typeof(ent), item),         \
158                                    sizeof(ent.item),                    \
159                                    is_signed_type(type), FILTER_OTHER); \
160         if (ret)                                                        \
161                 return ret;
162
163 static int trace_define_generic_fields(void)
164 {
165         int ret;
166
167         __generic_field(int, CPU, FILTER_CPU);
168         __generic_field(int, cpu, FILTER_CPU);
169         __generic_field(char *, COMM, FILTER_COMM);
170         __generic_field(char *, comm, FILTER_COMM);
171
172         return ret;
173 }
174
175 static int trace_define_common_fields(void)
176 {
177         int ret;
178         struct trace_entry ent;
179
180         __common_field(unsigned short, type);
181         __common_field(unsigned char, flags);
182         __common_field(unsigned char, preempt_count);
183         __common_field(int, pid);
184
185         return ret;
186 }
187
188 static void trace_destroy_fields(struct trace_event_call *call)
189 {
190         struct ftrace_event_field *field, *next;
191         struct list_head *head;
192
193         head = trace_get_fields(call);
194         list_for_each_entry_safe(field, next, head, link) {
195                 list_del(&field->link);
196                 kmem_cache_free(field_cachep, field);
197         }
198 }
199
200 /*
201  * run-time version of trace_event_get_offsets_<call>() that returns the last
202  * accessible offset of trace fields excluding __dynamic_array bytes
203  */
204 int trace_event_get_offsets(struct trace_event_call *call)
205 {
206         struct ftrace_event_field *tail;
207         struct list_head *head;
208
209         head = trace_get_fields(call);
210         /*
211          * head->next points to the last field with the largest offset,
212          * since it was added last by trace_define_field()
213          */
214         tail = list_first_entry(head, struct ftrace_event_field, link);
215         return tail->offset + tail->size;
216 }
217
218 int trace_event_raw_init(struct trace_event_call *call)
219 {
220         int id;
221
222         id = register_trace_event(&call->event);
223         if (!id)
224                 return -ENODEV;
225
226         return 0;
227 }
228 EXPORT_SYMBOL_GPL(trace_event_raw_init);
229
230 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
231 {
232         struct trace_array *tr = trace_file->tr;
233         struct trace_array_cpu *data;
234         struct trace_pid_list *pid_list;
235
236         pid_list = rcu_dereference_raw(tr->filtered_pids);
237         if (!pid_list)
238                 return false;
239
240         data = this_cpu_ptr(tr->trace_buffer.data);
241
242         return data->ignore_pid;
243 }
244 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
245
246 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
247                                  struct trace_event_file *trace_file,
248                                  unsigned long len)
249 {
250         struct trace_event_call *event_call = trace_file->event_call;
251
252         if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
253             trace_event_ignore_this_pid(trace_file))
254                 return NULL;
255
256         local_save_flags(fbuffer->flags);
257         fbuffer->pc = preempt_count();
258         /*
259          * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
260          * preemption (adding one to the preempt_count). Since we are
261          * interested in the preempt_count at the time the tracepoint was
262          * hit, we need to subtract one to offset the increment.
263          */
264         if (IS_ENABLED(CONFIG_PREEMPTION))
265                 fbuffer->pc--;
266         fbuffer->trace_file = trace_file;
267
268         fbuffer->event =
269                 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
270                                                 event_call->event.type, len,
271                                                 fbuffer->flags, fbuffer->pc);
272         if (!fbuffer->event)
273                 return NULL;
274
275         fbuffer->entry = ring_buffer_event_data(fbuffer->event);
276         return fbuffer->entry;
277 }
278 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
279
280 int trace_event_reg(struct trace_event_call *call,
281                     enum trace_reg type, void *data)
282 {
283         struct trace_event_file *file = data;
284
285         WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
286         switch (type) {
287         case TRACE_REG_REGISTER:
288                 return tracepoint_probe_register(call->tp,
289                                                  call->class->probe,
290                                                  file);
291         case TRACE_REG_UNREGISTER:
292                 tracepoint_probe_unregister(call->tp,
293                                             call->class->probe,
294                                             file);
295                 return 0;
296
297 #ifdef CONFIG_PERF_EVENTS
298         case TRACE_REG_PERF_REGISTER:
299                 return tracepoint_probe_register(call->tp,
300                                                  call->class->perf_probe,
301                                                  call);
302         case TRACE_REG_PERF_UNREGISTER:
303                 tracepoint_probe_unregister(call->tp,
304                                             call->class->perf_probe,
305                                             call);
306                 return 0;
307         case TRACE_REG_PERF_OPEN:
308         case TRACE_REG_PERF_CLOSE:
309         case TRACE_REG_PERF_ADD:
310         case TRACE_REG_PERF_DEL:
311                 return 0;
312 #endif
313         }
314         return 0;
315 }
316 EXPORT_SYMBOL_GPL(trace_event_reg);
317
318 void trace_event_enable_cmd_record(bool enable)
319 {
320         struct trace_event_file *file;
321         struct trace_array *tr;
322
323         mutex_lock(&event_mutex);
324         do_for_each_event_file(tr, file) {
325
326                 if (!(file->flags & EVENT_FILE_FL_ENABLED))
327                         continue;
328
329                 if (enable) {
330                         tracing_start_cmdline_record();
331                         set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
332                 } else {
333                         tracing_stop_cmdline_record();
334                         clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
335                 }
336         } while_for_each_event_file();
337         mutex_unlock(&event_mutex);
338 }
339
340 void trace_event_enable_tgid_record(bool enable)
341 {
342         struct trace_event_file *file;
343         struct trace_array *tr;
344
345         mutex_lock(&event_mutex);
346         do_for_each_event_file(tr, file) {
347                 if (!(file->flags & EVENT_FILE_FL_ENABLED))
348                         continue;
349
350                 if (enable) {
351                         tracing_start_tgid_record();
352                         set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
353                 } else {
354                         tracing_stop_tgid_record();
355                         clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
356                                   &file->flags);
357                 }
358         } while_for_each_event_file();
359         mutex_unlock(&event_mutex);
360 }
361
362 static int __ftrace_event_enable_disable(struct trace_event_file *file,
363                                          int enable, int soft_disable)
364 {
365         struct trace_event_call *call = file->event_call;
366         struct trace_array *tr = file->tr;
367         unsigned long file_flags = file->flags;
368         int ret = 0;
369         int disable;
370
371         switch (enable) {
372         case 0:
373                 /*
374                  * When soft_disable is set and enable is cleared, the sm_ref
375                  * reference counter is decremented. If it reaches 0, we want
376                  * to clear the SOFT_DISABLED flag but leave the event in the
377                  * state that it was. That is, if the event was enabled and
378                  * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
379                  * is set we do not want the event to be enabled before we
380                  * clear the bit.
381                  *
382                  * When soft_disable is not set but the SOFT_MODE flag is,
383                  * we do nothing. Do not disable the tracepoint, otherwise
384                  * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
385                  */
386                 if (soft_disable) {
387                         if (atomic_dec_return(&file->sm_ref) > 0)
388                                 break;
389                         disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
390                         clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
391                 } else
392                         disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
393
394                 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
395                         clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
396                         if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
397                                 tracing_stop_cmdline_record();
398                                 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
399                         }
400
401                         if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
402                                 tracing_stop_tgid_record();
403                                 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
404                         }
405
406                         call->class->reg(call, TRACE_REG_UNREGISTER, file);
407                 }
408                 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
409                 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
410                         set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
411                 else
412                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
413                 break;
414         case 1:
415                 /*
416                  * When soft_disable is set and enable is set, we want to
417                  * register the tracepoint for the event, but leave the event
418                  * as is. That means, if the event was already enabled, we do
419                  * nothing (but set SOFT_MODE). If the event is disabled, we
420                  * set SOFT_DISABLED before enabling the event tracepoint, so
421                  * it still seems to be disabled.
422                  */
423                 if (!soft_disable)
424                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
425                 else {
426                         if (atomic_inc_return(&file->sm_ref) > 1)
427                                 break;
428                         set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
429                 }
430
431                 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
432                         bool cmd = false, tgid = false;
433
434                         /* Keep the event disabled, when going to SOFT_MODE. */
435                         if (soft_disable)
436                                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
437
438                         if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
439                                 cmd = true;
440                                 tracing_start_cmdline_record();
441                                 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
442                         }
443
444                         if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
445                                 tgid = true;
446                                 tracing_start_tgid_record();
447                                 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
448                         }
449
450                         ret = call->class->reg(call, TRACE_REG_REGISTER, file);
451                         if (ret) {
452                                 if (cmd)
453                                         tracing_stop_cmdline_record();
454                                 if (tgid)
455                                         tracing_stop_tgid_record();
456                                 pr_info("event trace: Could not enable event "
457                                         "%s\n", trace_event_name(call));
458                                 break;
459                         }
460                         set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
461
462                         /* WAS_ENABLED gets set but never cleared. */
463                         set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
464                 }
465                 break;
466         }
467
468         /* Enable or disable use of trace_buffered_event */
469         if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
470             (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
471                 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
472                         trace_buffered_event_enable();
473                 else
474                         trace_buffered_event_disable();
475         }
476
477         return ret;
478 }
479
480 int trace_event_enable_disable(struct trace_event_file *file,
481                                int enable, int soft_disable)
482 {
483         return __ftrace_event_enable_disable(file, enable, soft_disable);
484 }
485
486 static int ftrace_event_enable_disable(struct trace_event_file *file,
487                                        int enable)
488 {
489         return __ftrace_event_enable_disable(file, enable, 0);
490 }
491
492 static void ftrace_clear_events(struct trace_array *tr)
493 {
494         struct trace_event_file *file;
495
496         mutex_lock(&event_mutex);
497         list_for_each_entry(file, &tr->events, list) {
498                 ftrace_event_enable_disable(file, 0);
499         }
500         mutex_unlock(&event_mutex);
501 }
502
503 static void
504 event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
505 {
506         struct trace_pid_list *pid_list;
507         struct trace_array *tr = data;
508
509         pid_list = rcu_dereference_raw(tr->filtered_pids);
510         trace_filter_add_remove_task(pid_list, NULL, task);
511 }
512
513 static void
514 event_filter_pid_sched_process_fork(void *data,
515                                     struct task_struct *self,
516                                     struct task_struct *task)
517 {
518         struct trace_pid_list *pid_list;
519         struct trace_array *tr = data;
520
521         pid_list = rcu_dereference_sched(tr->filtered_pids);
522         trace_filter_add_remove_task(pid_list, self, task);
523 }
524
525 void trace_event_follow_fork(struct trace_array *tr, bool enable)
526 {
527         if (enable) {
528                 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
529                                                        tr, INT_MIN);
530                 register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
531                                                        tr, INT_MAX);
532         } else {
533                 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
534                                                     tr);
535                 unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
536                                                     tr);
537         }
538 }
539
540 static void
541 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
542                     struct task_struct *prev, struct task_struct *next)
543 {
544         struct trace_array *tr = data;
545         struct trace_pid_list *pid_list;
546
547         pid_list = rcu_dereference_sched(tr->filtered_pids);
548
549         this_cpu_write(tr->trace_buffer.data->ignore_pid,
550                        trace_ignore_this_task(pid_list, prev) &&
551                        trace_ignore_this_task(pid_list, next));
552 }
553
554 static void
555 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
556                     struct task_struct *prev, struct task_struct *next)
557 {
558         struct trace_array *tr = data;
559         struct trace_pid_list *pid_list;
560
561         pid_list = rcu_dereference_sched(tr->filtered_pids);
562
563         this_cpu_write(tr->trace_buffer.data->ignore_pid,
564                        trace_ignore_this_task(pid_list, next));
565 }
566
567 static void
568 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
569 {
570         struct trace_array *tr = data;
571         struct trace_pid_list *pid_list;
572
573         /* Nothing to do if we are already tracing */
574         if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
575                 return;
576
577         pid_list = rcu_dereference_sched(tr->filtered_pids);
578
579         this_cpu_write(tr->trace_buffer.data->ignore_pid,
580                        trace_ignore_this_task(pid_list, task));
581 }
582
583 static void
584 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
585 {
586         struct trace_array *tr = data;
587         struct trace_pid_list *pid_list;
588
589         /* Nothing to do if we are not tracing */
590         if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
591                 return;
592
593         pid_list = rcu_dereference_sched(tr->filtered_pids);
594
595         /* Set tracing if current is enabled */
596         this_cpu_write(tr->trace_buffer.data->ignore_pid,
597                        trace_ignore_this_task(pid_list, current));
598 }
599
600 static void __ftrace_clear_event_pids(struct trace_array *tr)
601 {
602         struct trace_pid_list *pid_list;
603         struct trace_event_file *file;
604         int cpu;
605
606         pid_list = rcu_dereference_protected(tr->filtered_pids,
607                                              lockdep_is_held(&event_mutex));
608         if (!pid_list)
609                 return;
610
611         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
612         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
613
614         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
615         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
616
617         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
618         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
619
620         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
621         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
622
623         list_for_each_entry(file, &tr->events, list) {
624                 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
625         }
626
627         for_each_possible_cpu(cpu)
628                 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
629
630         rcu_assign_pointer(tr->filtered_pids, NULL);
631
632         /* Wait till all users are no longer using pid filtering */
633         tracepoint_synchronize_unregister();
634
635         trace_free_pid_list(pid_list);
636 }
637
638 static void ftrace_clear_event_pids(struct trace_array *tr)
639 {
640         mutex_lock(&event_mutex);
641         __ftrace_clear_event_pids(tr);
642         mutex_unlock(&event_mutex);
643 }
644
645 static void __put_system(struct event_subsystem *system)
646 {
647         struct event_filter *filter = system->filter;
648
649         WARN_ON_ONCE(system_refcount(system) == 0);
650         if (system_refcount_dec(system))
651                 return;
652
653         list_del(&system->list);
654
655         if (filter) {
656                 kfree(filter->filter_string);
657                 kfree(filter);
658         }
659         kfree_const(system->name);
660         kfree(system);
661 }
662
663 static void __get_system(struct event_subsystem *system)
664 {
665         WARN_ON_ONCE(system_refcount(system) == 0);
666         system_refcount_inc(system);
667 }
668
669 static void __get_system_dir(struct trace_subsystem_dir *dir)
670 {
671         WARN_ON_ONCE(dir->ref_count == 0);
672         dir->ref_count++;
673         __get_system(dir->subsystem);
674 }
675
676 static void __put_system_dir(struct trace_subsystem_dir *dir)
677 {
678         WARN_ON_ONCE(dir->ref_count == 0);
679         /* If the subsystem is about to be freed, the dir must be too */
680         WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
681
682         __put_system(dir->subsystem);
683         if (!--dir->ref_count)
684                 kfree(dir);
685 }
686
687 static void put_system(struct trace_subsystem_dir *dir)
688 {
689         mutex_lock(&event_mutex);
690         __put_system_dir(dir);
691         mutex_unlock(&event_mutex);
692 }
693
694 static void remove_subsystem(struct trace_subsystem_dir *dir)
695 {
696         if (!dir)
697                 return;
698
699         if (!--dir->nr_events) {
700                 tracefs_remove_recursive(dir->entry);
701                 list_del(&dir->list);
702                 __put_system_dir(dir);
703         }
704 }
705
706 static void remove_event_file_dir(struct trace_event_file *file)
707 {
708         struct dentry *dir = file->dir;
709         struct dentry *child;
710
711         if (dir) {
712                 spin_lock(&dir->d_lock);        /* probably unneeded */
713                 list_for_each_entry(child, &dir->d_subdirs, d_child) {
714                         if (d_really_is_positive(child))        /* probably unneeded */
715                                 d_inode(child)->i_private = NULL;
716                 }
717                 spin_unlock(&dir->d_lock);
718
719                 tracefs_remove_recursive(dir);
720         }
721
722         list_del(&file->list);
723         remove_subsystem(file->system);
724         free_event_filter(file->filter);
725         kmem_cache_free(file_cachep, file);
726 }
727
728 /*
729  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
730  */
731 static int
732 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
733                               const char *sub, const char *event, int set)
734 {
735         struct trace_event_file *file;
736         struct trace_event_call *call;
737         const char *name;
738         int ret = -EINVAL;
739         int eret = 0;
740
741         list_for_each_entry(file, &tr->events, list) {
742
743                 call = file->event_call;
744                 name = trace_event_name(call);
745
746                 if (!name || !call->class || !call->class->reg)
747                         continue;
748
749                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
750                         continue;
751
752                 if (match &&
753                     strcmp(match, name) != 0 &&
754                     strcmp(match, call->class->system) != 0)
755                         continue;
756
757                 if (sub && strcmp(sub, call->class->system) != 0)
758                         continue;
759
760                 if (event && strcmp(event, name) != 0)
761                         continue;
762
763                 ret = ftrace_event_enable_disable(file, set);
764
765                 /*
766                  * Save the first error and return that. Some events
767                  * may still have been enabled, but let the user
768                  * know that something went wrong.
769                  */
770                 if (ret && !eret)
771                         eret = ret;
772
773                 ret = eret;
774         }
775
776         return ret;
777 }
778
779 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
780                                   const char *sub, const char *event, int set)
781 {
782         int ret;
783
784         mutex_lock(&event_mutex);
785         ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
786         mutex_unlock(&event_mutex);
787
788         return ret;
789 }
790
791 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
792 {
793         char *event = NULL, *sub = NULL, *match;
794         int ret;
795
796         if (!tr)
797                 return -ENOENT;
798         /*
799          * The buf format can be <subsystem>:<event-name>
800          *  *:<event-name> means any event by that name.
801          *  :<event-name> is the same.
802          *
803          *  <subsystem>:* means all events in that subsystem
804          *  <subsystem>: means the same.
805          *
806          *  <name> (no ':') means all events in a subsystem with
807          *  the name <name> or any event that matches <name>
808          */
809
810         match = strsep(&buf, ":");
811         if (buf) {
812                 sub = match;
813                 event = buf;
814                 match = NULL;
815
816                 if (!strlen(sub) || strcmp(sub, "*") == 0)
817                         sub = NULL;
818                 if (!strlen(event) || strcmp(event, "*") == 0)
819                         event = NULL;
820         }
821
822         ret = __ftrace_set_clr_event(tr, match, sub, event, set);
823
824         /* Put back the colon to allow this to be called again */
825         if (buf)
826                 *(buf - 1) = ':';
827
828         return ret;
829 }
830
831 /**
832  * trace_set_clr_event - enable or disable an event
833  * @system: system name to match (NULL for any system)
834  * @event: event name to match (NULL for all events, within system)
835  * @set: 1 to enable, 0 to disable
836  *
837  * This is a way for other parts of the kernel to enable or disable
838  * event recording.
839  *
840  * Returns 0 on success, -EINVAL if the parameters do not match any
841  * registered events.
842  */
843 int trace_set_clr_event(const char *system, const char *event, int set)
844 {
845         struct trace_array *tr = top_trace_array();
846
847         if (!tr)
848                 return -ENODEV;
849
850         return __ftrace_set_clr_event(tr, NULL, system, event, set);
851 }
852 EXPORT_SYMBOL_GPL(trace_set_clr_event);
853
854 /**
855  * trace_array_set_clr_event - enable or disable an event for a trace array.
856  * @tr: concerned trace array.
857  * @system: system name to match (NULL for any system)
858  * @event: event name to match (NULL for all events, within system)
859  * @enable: true to enable, false to disable
860  *
861  * This is a way for other parts of the kernel to enable or disable
862  * event recording.
863  *
864  * Returns 0 on success, -EINVAL if the parameters do not match any
865  * registered events.
866  */
867 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
868                 const char *event, bool enable)
869 {
870         int set;
871
872         if (!tr)
873                 return -ENOENT;
874
875         set = (enable == true) ? 1 : 0;
876         return __ftrace_set_clr_event(tr, NULL, system, event, set);
877 }
878 EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
879
880 /* 128 should be much more than enough */
881 #define EVENT_BUF_SIZE          127
882
883 static ssize_t
884 ftrace_event_write(struct file *file, const char __user *ubuf,
885                    size_t cnt, loff_t *ppos)
886 {
887         struct trace_parser parser;
888         struct seq_file *m = file->private_data;
889         struct trace_array *tr = m->private;
890         ssize_t read, ret;
891
892         if (!cnt)
893                 return 0;
894
895         ret = tracing_update_buffers();
896         if (ret < 0)
897                 return ret;
898
899         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
900                 return -ENOMEM;
901
902         read = trace_get_user(&parser, ubuf, cnt, ppos);
903
904         if (read >= 0 && trace_parser_loaded((&parser))) {
905                 int set = 1;
906
907                 if (*parser.buffer == '!')
908                         set = 0;
909
910                 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
911                 if (ret)
912                         goto out_put;
913         }
914
915         ret = read;
916
917  out_put:
918         trace_parser_put(&parser);
919
920         return ret;
921 }
922
923 static void *
924 t_next(struct seq_file *m, void *v, loff_t *pos)
925 {
926         struct trace_event_file *file = v;
927         struct trace_event_call *call;
928         struct trace_array *tr = m->private;
929
930         (*pos)++;
931
932         list_for_each_entry_continue(file, &tr->events, list) {
933                 call = file->event_call;
934                 /*
935                  * The ftrace subsystem is for showing formats only.
936                  * They can not be enabled or disabled via the event files.
937                  */
938                 if (call->class && call->class->reg &&
939                     !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
940                         return file;
941         }
942
943         return NULL;
944 }
945
946 static void *t_start(struct seq_file *m, loff_t *pos)
947 {
948         struct trace_event_file *file;
949         struct trace_array *tr = m->private;
950         loff_t l;
951
952         mutex_lock(&event_mutex);
953
954         file = list_entry(&tr->events, struct trace_event_file, list);
955         for (l = 0; l <= *pos; ) {
956                 file = t_next(m, file, &l);
957                 if (!file)
958                         break;
959         }
960         return file;
961 }
962
963 static void *
964 s_next(struct seq_file *m, void *v, loff_t *pos)
965 {
966         struct trace_event_file *file = v;
967         struct trace_array *tr = m->private;
968
969         (*pos)++;
970
971         list_for_each_entry_continue(file, &tr->events, list) {
972                 if (file->flags & EVENT_FILE_FL_ENABLED)
973                         return file;
974         }
975
976         return NULL;
977 }
978
979 static void *s_start(struct seq_file *m, loff_t *pos)
980 {
981         struct trace_event_file *file;
982         struct trace_array *tr = m->private;
983         loff_t l;
984
985         mutex_lock(&event_mutex);
986
987         file = list_entry(&tr->events, struct trace_event_file, list);
988         for (l = 0; l <= *pos; ) {
989                 file = s_next(m, file, &l);
990                 if (!file)
991                         break;
992         }
993         return file;
994 }
995
996 static int t_show(struct seq_file *m, void *v)
997 {
998         struct trace_event_file *file = v;
999         struct trace_event_call *call = file->event_call;
1000
1001         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1002                 seq_printf(m, "%s:", call->class->system);
1003         seq_printf(m, "%s\n", trace_event_name(call));
1004
1005         return 0;
1006 }
1007
1008 static void t_stop(struct seq_file *m, void *p)
1009 {
1010         mutex_unlock(&event_mutex);
1011 }
1012
1013 static void *
1014 p_next(struct seq_file *m, void *v, loff_t *pos)
1015 {
1016         struct trace_array *tr = m->private;
1017         struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
1018
1019         return trace_pid_next(pid_list, v, pos);
1020 }
1021
1022 static void *p_start(struct seq_file *m, loff_t *pos)
1023         __acquires(RCU)
1024 {
1025         struct trace_pid_list *pid_list;
1026         struct trace_array *tr = m->private;
1027
1028         /*
1029          * Grab the mutex, to keep calls to p_next() having the same
1030          * tr->filtered_pids as p_start() has.
1031          * If we just passed the tr->filtered_pids around, then RCU would
1032          * have been enough, but doing that makes things more complex.
1033          */
1034         mutex_lock(&event_mutex);
1035         rcu_read_lock_sched();
1036
1037         pid_list = rcu_dereference_sched(tr->filtered_pids);
1038
1039         if (!pid_list)
1040                 return NULL;
1041
1042         return trace_pid_start(pid_list, pos);
1043 }
1044
1045 static void p_stop(struct seq_file *m, void *p)
1046         __releases(RCU)
1047 {
1048         rcu_read_unlock_sched();
1049         mutex_unlock(&event_mutex);
1050 }
1051
1052 static ssize_t
1053 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1054                   loff_t *ppos)
1055 {
1056         struct trace_event_file *file;
1057         unsigned long flags;
1058         char buf[4] = "0";
1059
1060         mutex_lock(&event_mutex);
1061         file = event_file_data(filp);
1062         if (likely(file))
1063                 flags = file->flags;
1064         mutex_unlock(&event_mutex);
1065
1066         if (!file)
1067                 return -ENODEV;
1068
1069         if (flags & EVENT_FILE_FL_ENABLED &&
1070             !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1071                 strcpy(buf, "1");
1072
1073         if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1074             flags & EVENT_FILE_FL_SOFT_MODE)
1075                 strcat(buf, "*");
1076
1077         strcat(buf, "\n");
1078
1079         return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1080 }
1081
1082 static ssize_t
1083 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1084                    loff_t *ppos)
1085 {
1086         struct trace_event_file *file;
1087         unsigned long val;
1088         int ret;
1089
1090         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1091         if (ret)
1092                 return ret;
1093
1094         ret = tracing_update_buffers();
1095         if (ret < 0)
1096                 return ret;
1097
1098         switch (val) {
1099         case 0:
1100         case 1:
1101                 ret = -ENODEV;
1102                 mutex_lock(&event_mutex);
1103                 file = event_file_data(filp);
1104                 if (likely(file))
1105                         ret = ftrace_event_enable_disable(file, val);
1106                 mutex_unlock(&event_mutex);
1107                 break;
1108
1109         default:
1110                 return -EINVAL;
1111         }
1112
1113         *ppos += cnt;
1114
1115         return ret ? ret : cnt;
1116 }
1117
1118 static ssize_t
1119 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1120                    loff_t *ppos)
1121 {
1122         const char set_to_char[4] = { '?', '0', '1', 'X' };
1123         struct trace_subsystem_dir *dir = filp->private_data;
1124         struct event_subsystem *system = dir->subsystem;
1125         struct trace_event_call *call;
1126         struct trace_event_file *file;
1127         struct trace_array *tr = dir->tr;
1128         char buf[2];
1129         int set = 0;
1130         int ret;
1131
1132         mutex_lock(&event_mutex);
1133         list_for_each_entry(file, &tr->events, list) {
1134                 call = file->event_call;
1135                 if (!trace_event_name(call) || !call->class || !call->class->reg)
1136                         continue;
1137
1138                 if (system && strcmp(call->class->system, system->name) != 0)
1139                         continue;
1140
1141                 /*
1142                  * We need to find out if all the events are set
1143                  * or if all events or cleared, or if we have
1144                  * a mixture.
1145                  */
1146                 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1147
1148                 /*
1149                  * If we have a mixture, no need to look further.
1150                  */
1151                 if (set == 3)
1152                         break;
1153         }
1154         mutex_unlock(&event_mutex);
1155
1156         buf[0] = set_to_char[set];
1157         buf[1] = '\n';
1158
1159         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1160
1161         return ret;
1162 }
1163
1164 static ssize_t
1165 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1166                     loff_t *ppos)
1167 {
1168         struct trace_subsystem_dir *dir = filp->private_data;
1169         struct event_subsystem *system = dir->subsystem;
1170         const char *name = NULL;
1171         unsigned long val;
1172         ssize_t ret;
1173
1174         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1175         if (ret)
1176                 return ret;
1177
1178         ret = tracing_update_buffers();
1179         if (ret < 0)
1180                 return ret;
1181
1182         if (val != 0 && val != 1)
1183                 return -EINVAL;
1184
1185         /*
1186          * Opening of "enable" adds a ref count to system,
1187          * so the name is safe to use.
1188          */
1189         if (system)
1190                 name = system->name;
1191
1192         ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1193         if (ret)
1194                 goto out;
1195
1196         ret = cnt;
1197
1198 out:
1199         *ppos += cnt;
1200
1201         return ret;
1202 }
1203
1204 enum {
1205         FORMAT_HEADER           = 1,
1206         FORMAT_FIELD_SEPERATOR  = 2,
1207         FORMAT_PRINTFMT         = 3,
1208 };
1209
1210 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1211 {
1212         struct trace_event_call *call = event_file_data(m->private);
1213         struct list_head *common_head = &ftrace_common_fields;
1214         struct list_head *head = trace_get_fields(call);
1215         struct list_head *node = v;
1216
1217         (*pos)++;
1218
1219         switch ((unsigned long)v) {
1220         case FORMAT_HEADER:
1221                 node = common_head;
1222                 break;
1223
1224         case FORMAT_FIELD_SEPERATOR:
1225                 node = head;
1226                 break;
1227
1228         case FORMAT_PRINTFMT:
1229                 /* all done */
1230                 return NULL;
1231         }
1232
1233         node = node->prev;
1234         if (node == common_head)
1235                 return (void *)FORMAT_FIELD_SEPERATOR;
1236         else if (node == head)
1237                 return (void *)FORMAT_PRINTFMT;
1238         else
1239                 return node;
1240 }
1241
1242 static int f_show(struct seq_file *m, void *v)
1243 {
1244         struct trace_event_call *call = event_file_data(m->private);
1245         struct ftrace_event_field *field;
1246         const char *array_descriptor;
1247
1248         switch ((unsigned long)v) {
1249         case FORMAT_HEADER:
1250                 seq_printf(m, "name: %s\n", trace_event_name(call));
1251                 seq_printf(m, "ID: %d\n", call->event.type);
1252                 seq_puts(m, "format:\n");
1253                 return 0;
1254
1255         case FORMAT_FIELD_SEPERATOR:
1256                 seq_putc(m, '\n');
1257                 return 0;
1258
1259         case FORMAT_PRINTFMT:
1260                 seq_printf(m, "\nprint fmt: %s\n",
1261                            call->print_fmt);
1262                 return 0;
1263         }
1264
1265         field = list_entry(v, struct ftrace_event_field, link);
1266         /*
1267          * Smartly shows the array type(except dynamic array).
1268          * Normal:
1269          *      field:TYPE VAR
1270          * If TYPE := TYPE[LEN], it is shown:
1271          *      field:TYPE VAR[LEN]
1272          */
1273         array_descriptor = strchr(field->type, '[');
1274
1275         if (str_has_prefix(field->type, "__data_loc"))
1276                 array_descriptor = NULL;
1277
1278         if (!array_descriptor)
1279                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1280                            field->type, field->name, field->offset,
1281                            field->size, !!field->is_signed);
1282         else
1283                 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1284                            (int)(array_descriptor - field->type),
1285                            field->type, field->name,
1286                            array_descriptor, field->offset,
1287                            field->size, !!field->is_signed);
1288
1289         return 0;
1290 }
1291
1292 static void *f_start(struct seq_file *m, loff_t *pos)
1293 {
1294         void *p = (void *)FORMAT_HEADER;
1295         loff_t l = 0;
1296
1297         /* ->stop() is called even if ->start() fails */
1298         mutex_lock(&event_mutex);
1299         if (!event_file_data(m->private))
1300                 return ERR_PTR(-ENODEV);
1301
1302         while (l < *pos && p)
1303                 p = f_next(m, p, &l);
1304
1305         return p;
1306 }
1307
1308 static void f_stop(struct seq_file *m, void *p)
1309 {
1310         mutex_unlock(&event_mutex);
1311 }
1312
1313 static const struct seq_operations trace_format_seq_ops = {
1314         .start          = f_start,
1315         .next           = f_next,
1316         .stop           = f_stop,
1317         .show           = f_show,
1318 };
1319
1320 static int trace_format_open(struct inode *inode, struct file *file)
1321 {
1322         struct seq_file *m;
1323         int ret;
1324
1325         /* Do we want to hide event format files on tracefs lockdown? */
1326
1327         ret = seq_open(file, &trace_format_seq_ops);
1328         if (ret < 0)
1329                 return ret;
1330
1331         m = file->private_data;
1332         m->private = file;
1333
1334         return 0;
1335 }
1336
1337 static ssize_t
1338 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1339 {
1340         int id = (long)event_file_data(filp);
1341         char buf[32];
1342         int len;
1343
1344         if (unlikely(!id))
1345                 return -ENODEV;
1346
1347         len = sprintf(buf, "%d\n", id);
1348
1349         return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1350 }
1351
1352 static ssize_t
1353 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1354                   loff_t *ppos)
1355 {
1356         struct trace_event_file *file;
1357         struct trace_seq *s;
1358         int r = -ENODEV;
1359
1360         if (*ppos)
1361                 return 0;
1362
1363         s = kmalloc(sizeof(*s), GFP_KERNEL);
1364
1365         if (!s)
1366                 return -ENOMEM;
1367
1368         trace_seq_init(s);
1369
1370         mutex_lock(&event_mutex);
1371         file = event_file_data(filp);
1372         if (file)
1373                 print_event_filter(file, s);
1374         mutex_unlock(&event_mutex);
1375
1376         if (file)
1377                 r = simple_read_from_buffer(ubuf, cnt, ppos,
1378                                             s->buffer, trace_seq_used(s));
1379
1380         kfree(s);
1381
1382         return r;
1383 }
1384
1385 static ssize_t
1386 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1387                    loff_t *ppos)
1388 {
1389         struct trace_event_file *file;
1390         char *buf;
1391         int err = -ENODEV;
1392
1393         if (cnt >= PAGE_SIZE)
1394                 return -EINVAL;
1395
1396         buf = memdup_user_nul(ubuf, cnt);
1397         if (IS_ERR(buf))
1398                 return PTR_ERR(buf);
1399
1400         mutex_lock(&event_mutex);
1401         file = event_file_data(filp);
1402         if (file)
1403                 err = apply_event_filter(file, buf);
1404         mutex_unlock(&event_mutex);
1405
1406         kfree(buf);
1407         if (err < 0)
1408                 return err;
1409
1410         *ppos += cnt;
1411
1412         return cnt;
1413 }
1414
1415 static LIST_HEAD(event_subsystems);
1416
1417 static int subsystem_open(struct inode *inode, struct file *filp)
1418 {
1419         struct event_subsystem *system = NULL;
1420         struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1421         struct trace_array *tr;
1422         int ret;
1423
1424         if (tracing_is_disabled())
1425                 return -ENODEV;
1426
1427         /* Make sure the system still exists */
1428         mutex_lock(&event_mutex);
1429         mutex_lock(&trace_types_lock);
1430         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1431                 list_for_each_entry(dir, &tr->systems, list) {
1432                         if (dir == inode->i_private) {
1433                                 /* Don't open systems with no events */
1434                                 if (dir->nr_events) {
1435                                         __get_system_dir(dir);
1436                                         system = dir->subsystem;
1437                                 }
1438                                 goto exit_loop;
1439                         }
1440                 }
1441         }
1442  exit_loop:
1443         mutex_unlock(&trace_types_lock);
1444         mutex_unlock(&event_mutex);
1445
1446         if (!system)
1447                 return -ENODEV;
1448
1449         /* Some versions of gcc think dir can be uninitialized here */
1450         WARN_ON(!dir);
1451
1452         /* Still need to increment the ref count of the system */
1453         if (trace_array_get(tr) < 0) {
1454                 put_system(dir);
1455                 return -ENODEV;
1456         }
1457
1458         ret = tracing_open_generic(inode, filp);
1459         if (ret < 0) {
1460                 trace_array_put(tr);
1461                 put_system(dir);
1462         }
1463
1464         return ret;
1465 }
1466
1467 static int system_tr_open(struct inode *inode, struct file *filp)
1468 {
1469         struct trace_subsystem_dir *dir;
1470         struct trace_array *tr = inode->i_private;
1471         int ret;
1472
1473         /* Make a temporary dir that has no system but points to tr */
1474         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1475         if (!dir)
1476                 return -ENOMEM;
1477
1478         ret = tracing_open_generic_tr(inode, filp);
1479         if (ret < 0) {
1480                 kfree(dir);
1481                 return ret;
1482         }
1483         dir->tr = tr;
1484         filp->private_data = dir;
1485
1486         return 0;
1487 }
1488
1489 static int subsystem_release(struct inode *inode, struct file *file)
1490 {
1491         struct trace_subsystem_dir *dir = file->private_data;
1492
1493         trace_array_put(dir->tr);
1494
1495         /*
1496          * If dir->subsystem is NULL, then this is a temporary
1497          * descriptor that was made for a trace_array to enable
1498          * all subsystems.
1499          */
1500         if (dir->subsystem)
1501                 put_system(dir);
1502         else
1503                 kfree(dir);
1504
1505         return 0;
1506 }
1507
1508 static ssize_t
1509 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1510                       loff_t *ppos)
1511 {
1512         struct trace_subsystem_dir *dir = filp->private_data;
1513         struct event_subsystem *system = dir->subsystem;
1514         struct trace_seq *s;
1515         int r;
1516
1517         if (*ppos)
1518                 return 0;
1519
1520         s = kmalloc(sizeof(*s), GFP_KERNEL);
1521         if (!s)
1522                 return -ENOMEM;
1523
1524         trace_seq_init(s);
1525
1526         print_subsystem_event_filter(system, s);
1527         r = simple_read_from_buffer(ubuf, cnt, ppos,
1528                                     s->buffer, trace_seq_used(s));
1529
1530         kfree(s);
1531
1532         return r;
1533 }
1534
1535 static ssize_t
1536 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1537                        loff_t *ppos)
1538 {
1539         struct trace_subsystem_dir *dir = filp->private_data;
1540         char *buf;
1541         int err;
1542
1543         if (cnt >= PAGE_SIZE)
1544                 return -EINVAL;
1545
1546         buf = memdup_user_nul(ubuf, cnt);
1547         if (IS_ERR(buf))
1548                 return PTR_ERR(buf);
1549
1550         err = apply_subsystem_event_filter(dir, buf);
1551         kfree(buf);
1552         if (err < 0)
1553                 return err;
1554
1555         *ppos += cnt;
1556
1557         return cnt;
1558 }
1559
1560 static ssize_t
1561 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1562 {
1563         int (*func)(struct trace_seq *s) = filp->private_data;
1564         struct trace_seq *s;
1565         int r;
1566
1567         if (*ppos)
1568                 return 0;
1569
1570         s = kmalloc(sizeof(*s), GFP_KERNEL);
1571         if (!s)
1572                 return -ENOMEM;
1573
1574         trace_seq_init(s);
1575
1576         func(s);
1577         r = simple_read_from_buffer(ubuf, cnt, ppos,
1578                                     s->buffer, trace_seq_used(s));
1579
1580         kfree(s);
1581
1582         return r;
1583 }
1584
1585 static void ignore_task_cpu(void *data)
1586 {
1587         struct trace_array *tr = data;
1588         struct trace_pid_list *pid_list;
1589
1590         /*
1591          * This function is called by on_each_cpu() while the
1592          * event_mutex is held.
1593          */
1594         pid_list = rcu_dereference_protected(tr->filtered_pids,
1595                                              mutex_is_locked(&event_mutex));
1596
1597         this_cpu_write(tr->trace_buffer.data->ignore_pid,
1598                        trace_ignore_this_task(pid_list, current));
1599 }
1600
1601 static ssize_t
1602 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1603                        size_t cnt, loff_t *ppos)
1604 {
1605         struct seq_file *m = filp->private_data;
1606         struct trace_array *tr = m->private;
1607         struct trace_pid_list *filtered_pids = NULL;
1608         struct trace_pid_list *pid_list;
1609         struct trace_event_file *file;
1610         ssize_t ret;
1611
1612         if (!cnt)
1613                 return 0;
1614
1615         ret = tracing_update_buffers();
1616         if (ret < 0)
1617                 return ret;
1618
1619         mutex_lock(&event_mutex);
1620
1621         filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1622                                              lockdep_is_held(&event_mutex));
1623
1624         ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
1625         if (ret < 0)
1626                 goto out;
1627
1628         rcu_assign_pointer(tr->filtered_pids, pid_list);
1629
1630         list_for_each_entry(file, &tr->events, list) {
1631                 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1632         }
1633
1634         if (filtered_pids) {
1635                 tracepoint_synchronize_unregister();
1636                 trace_free_pid_list(filtered_pids);
1637         } else if (pid_list) {
1638                 /*
1639                  * Register a probe that is called before all other probes
1640                  * to set ignore_pid if next or prev do not match.
1641                  * Register a probe this is called after all other probes
1642                  * to only keep ignore_pid set if next pid matches.
1643                  */
1644                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1645                                                  tr, INT_MAX);
1646                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1647                                                  tr, 0);
1648
1649                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1650                                                  tr, INT_MAX);
1651                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1652                                                  tr, 0);
1653
1654                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1655                                                      tr, INT_MAX);
1656                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1657                                                      tr, 0);
1658
1659                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1660                                                  tr, INT_MAX);
1661                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1662                                                  tr, 0);
1663         }
1664
1665         /*
1666          * Ignoring of pids is done at task switch. But we have to
1667          * check for those tasks that are currently running.
1668          * Always do this in case a pid was appended or removed.
1669          */
1670         on_each_cpu(ignore_task_cpu, tr, 1);
1671
1672  out:
1673         mutex_unlock(&event_mutex);
1674
1675         if (ret > 0)
1676                 *ppos += ret;
1677
1678         return ret;
1679 }
1680
1681 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1682 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1683 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1684 static int ftrace_event_release(struct inode *inode, struct file *file);
1685
1686 static const struct seq_operations show_event_seq_ops = {
1687         .start = t_start,
1688         .next = t_next,
1689         .show = t_show,
1690         .stop = t_stop,
1691 };
1692
1693 static const struct seq_operations show_set_event_seq_ops = {
1694         .start = s_start,
1695         .next = s_next,
1696         .show = t_show,
1697         .stop = t_stop,
1698 };
1699
1700 static const struct seq_operations show_set_pid_seq_ops = {
1701         .start = p_start,
1702         .next = p_next,
1703         .show = trace_pid_show,
1704         .stop = p_stop,
1705 };
1706
1707 static const struct file_operations ftrace_avail_fops = {
1708         .open = ftrace_event_avail_open,
1709         .read = seq_read,
1710         .llseek = seq_lseek,
1711         .release = seq_release,
1712 };
1713
1714 static const struct file_operations ftrace_set_event_fops = {
1715         .open = ftrace_event_set_open,
1716         .read = seq_read,
1717         .write = ftrace_event_write,
1718         .llseek = seq_lseek,
1719         .release = ftrace_event_release,
1720 };
1721
1722 static const struct file_operations ftrace_set_event_pid_fops = {
1723         .open = ftrace_event_set_pid_open,
1724         .read = seq_read,
1725         .write = ftrace_event_pid_write,
1726         .llseek = seq_lseek,
1727         .release = ftrace_event_release,
1728 };
1729
1730 static const struct file_operations ftrace_enable_fops = {
1731         .open = tracing_open_generic,
1732         .read = event_enable_read,
1733         .write = event_enable_write,
1734         .llseek = default_llseek,
1735 };
1736
1737 static const struct file_operations ftrace_event_format_fops = {
1738         .open = trace_format_open,
1739         .read = seq_read,
1740         .llseek = seq_lseek,
1741         .release = seq_release,
1742 };
1743
1744 static const struct file_operations ftrace_event_id_fops = {
1745         .read = event_id_read,
1746         .llseek = default_llseek,
1747 };
1748
1749 static const struct file_operations ftrace_event_filter_fops = {
1750         .open = tracing_open_generic,
1751         .read = event_filter_read,
1752         .write = event_filter_write,
1753         .llseek = default_llseek,
1754 };
1755
1756 static const struct file_operations ftrace_subsystem_filter_fops = {
1757         .open = subsystem_open,
1758         .read = subsystem_filter_read,
1759         .write = subsystem_filter_write,
1760         .llseek = default_llseek,
1761         .release = subsystem_release,
1762 };
1763
1764 static const struct file_operations ftrace_system_enable_fops = {
1765         .open = subsystem_open,
1766         .read = system_enable_read,
1767         .write = system_enable_write,
1768         .llseek = default_llseek,
1769         .release = subsystem_release,
1770 };
1771
1772 static const struct file_operations ftrace_tr_enable_fops = {
1773         .open = system_tr_open,
1774         .read = system_enable_read,
1775         .write = system_enable_write,
1776         .llseek = default_llseek,
1777         .release = subsystem_release,
1778 };
1779
1780 static const struct file_operations ftrace_show_header_fops = {
1781         .open = tracing_open_generic,
1782         .read = show_header,
1783         .llseek = default_llseek,
1784 };
1785
1786 static int
1787 ftrace_event_open(struct inode *inode, struct file *file,
1788                   const struct seq_operations *seq_ops)
1789 {
1790         struct seq_file *m;
1791         int ret;
1792
1793         ret = security_locked_down(LOCKDOWN_TRACEFS);
1794         if (ret)
1795                 return ret;
1796
1797         ret = seq_open(file, seq_ops);
1798         if (ret < 0)
1799                 return ret;
1800         m = file->private_data;
1801         /* copy tr over to seq ops */
1802         m->private = inode->i_private;
1803
1804         return ret;
1805 }
1806
1807 static int ftrace_event_release(struct inode *inode, struct file *file)
1808 {
1809         struct trace_array *tr = inode->i_private;
1810
1811         trace_array_put(tr);
1812
1813         return seq_release(inode, file);
1814 }
1815
1816 static int
1817 ftrace_event_avail_open(struct inode *inode, struct file *file)
1818 {
1819         const struct seq_operations *seq_ops = &show_event_seq_ops;
1820
1821         /* Checks for tracefs lockdown */
1822         return ftrace_event_open(inode, file, seq_ops);
1823 }
1824
1825 static int
1826 ftrace_event_set_open(struct inode *inode, struct file *file)
1827 {
1828         const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1829         struct trace_array *tr = inode->i_private;
1830         int ret;
1831
1832         ret = tracing_check_open_get_tr(tr);
1833         if (ret)
1834                 return ret;
1835
1836         if ((file->f_mode & FMODE_WRITE) &&
1837             (file->f_flags & O_TRUNC))
1838                 ftrace_clear_events(tr);
1839
1840         ret = ftrace_event_open(inode, file, seq_ops);
1841         if (ret < 0)
1842                 trace_array_put(tr);
1843         return ret;
1844 }
1845
1846 static int
1847 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1848 {
1849         const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1850         struct trace_array *tr = inode->i_private;
1851         int ret;
1852
1853         ret = tracing_check_open_get_tr(tr);
1854         if (ret)
1855                 return ret;
1856
1857         if ((file->f_mode & FMODE_WRITE) &&
1858             (file->f_flags & O_TRUNC))
1859                 ftrace_clear_event_pids(tr);
1860
1861         ret = ftrace_event_open(inode, file, seq_ops);
1862         if (ret < 0)
1863                 trace_array_put(tr);
1864         return ret;
1865 }
1866
1867 static struct event_subsystem *
1868 create_new_subsystem(const char *name)
1869 {
1870         struct event_subsystem *system;
1871
1872         /* need to create new entry */
1873         system = kmalloc(sizeof(*system), GFP_KERNEL);
1874         if (!system)
1875                 return NULL;
1876
1877         system->ref_count = 1;
1878
1879         /* Only allocate if dynamic (kprobes and modules) */
1880         system->name = kstrdup_const(name, GFP_KERNEL);
1881         if (!system->name)
1882                 goto out_free;
1883
1884         system->filter = NULL;
1885
1886         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1887         if (!system->filter)
1888                 goto out_free;
1889
1890         list_add(&system->list, &event_subsystems);
1891
1892         return system;
1893
1894  out_free:
1895         kfree_const(system->name);
1896         kfree(system);
1897         return NULL;
1898 }
1899
1900 static struct dentry *
1901 event_subsystem_dir(struct trace_array *tr, const char *name,
1902                     struct trace_event_file *file, struct dentry *parent)
1903 {
1904         struct trace_subsystem_dir *dir;
1905         struct event_subsystem *system;
1906         struct dentry *entry;
1907
1908         /* First see if we did not already create this dir */
1909         list_for_each_entry(dir, &tr->systems, list) {
1910                 system = dir->subsystem;
1911                 if (strcmp(system->name, name) == 0) {
1912                         dir->nr_events++;
1913                         file->system = dir;
1914                         return dir->entry;
1915                 }
1916         }
1917
1918         /* Now see if the system itself exists. */
1919         list_for_each_entry(system, &event_subsystems, list) {
1920                 if (strcmp(system->name, name) == 0)
1921                         break;
1922         }
1923         /* Reset system variable when not found */
1924         if (&system->list == &event_subsystems)
1925                 system = NULL;
1926
1927         dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1928         if (!dir)
1929                 goto out_fail;
1930
1931         if (!system) {
1932                 system = create_new_subsystem(name);
1933                 if (!system)
1934                         goto out_free;
1935         } else
1936                 __get_system(system);
1937
1938         dir->entry = tracefs_create_dir(name, parent);
1939         if (!dir->entry) {
1940                 pr_warn("Failed to create system directory %s\n", name);
1941                 __put_system(system);
1942                 goto out_free;
1943         }
1944
1945         dir->tr = tr;
1946         dir->ref_count = 1;
1947         dir->nr_events = 1;
1948         dir->subsystem = system;
1949         file->system = dir;
1950
1951         entry = tracefs_create_file("filter", 0644, dir->entry, dir,
1952                                     &ftrace_subsystem_filter_fops);
1953         if (!entry) {
1954                 kfree(system->filter);
1955                 system->filter = NULL;
1956                 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
1957         }
1958
1959         trace_create_file("enable", 0644, dir->entry, dir,
1960                           &ftrace_system_enable_fops);
1961
1962         list_add(&dir->list, &tr->systems);
1963
1964         return dir->entry;
1965
1966  out_free:
1967         kfree(dir);
1968  out_fail:
1969         /* Only print this message if failed on memory allocation */
1970         if (!dir || !system)
1971                 pr_warn("No memory to create event subsystem %s\n", name);
1972         return NULL;
1973 }
1974
1975 static int
1976 event_create_dir(struct dentry *parent, struct trace_event_file *file)
1977 {
1978         struct trace_event_call *call = file->event_call;
1979         struct trace_array *tr = file->tr;
1980         struct list_head *head;
1981         struct dentry *d_events;
1982         const char *name;
1983         int ret;
1984
1985         /*
1986          * If the trace point header did not define TRACE_SYSTEM
1987          * then the system would be called "TRACE_SYSTEM".
1988          */
1989         if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1990                 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1991                 if (!d_events)
1992                         return -ENOMEM;
1993         } else
1994                 d_events = parent;
1995
1996         name = trace_event_name(call);
1997         file->dir = tracefs_create_dir(name, d_events);
1998         if (!file->dir) {
1999                 pr_warn("Could not create tracefs '%s' directory\n", name);
2000                 return -1;
2001         }
2002
2003         if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2004                 trace_create_file("enable", 0644, file->dir, file,
2005                                   &ftrace_enable_fops);
2006
2007 #ifdef CONFIG_PERF_EVENTS
2008         if (call->event.type && call->class->reg)
2009                 trace_create_file("id", 0444, file->dir,
2010                                   (void *)(long)call->event.type,
2011                                   &ftrace_event_id_fops);
2012 #endif
2013
2014         /*
2015          * Other events may have the same class. Only update
2016          * the fields if they are not already defined.
2017          */
2018         head = trace_get_fields(call);
2019         if (list_empty(head)) {
2020                 ret = call->class->define_fields(call);
2021                 if (ret < 0) {
2022                         pr_warn("Could not initialize trace point events/%s\n",
2023                                 name);
2024                         return -1;
2025                 }
2026         }
2027
2028         /*
2029          * Only event directories that can be enabled should have
2030          * triggers or filters.
2031          */
2032         if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
2033                 trace_create_file("filter", 0644, file->dir, file,
2034                                   &ftrace_event_filter_fops);
2035
2036                 trace_create_file("trigger", 0644, file->dir, file,
2037                                   &event_trigger_fops);
2038         }
2039
2040 #ifdef CONFIG_HIST_TRIGGERS
2041         trace_create_file("hist", 0444, file->dir, file,
2042                           &event_hist_fops);
2043 #endif
2044         trace_create_file("format", 0444, file->dir, call,
2045                           &ftrace_event_format_fops);
2046
2047 #ifdef CONFIG_TRACE_EVENT_INJECT
2048         if (call->event.type && call->class->reg)
2049                 trace_create_file("inject", 0200, file->dir, file,
2050                                   &event_inject_fops);
2051 #endif
2052
2053         return 0;
2054 }
2055
2056 static void remove_event_from_tracers(struct trace_event_call *call)
2057 {
2058         struct trace_event_file *file;
2059         struct trace_array *tr;
2060
2061         do_for_each_event_file_safe(tr, file) {
2062                 if (file->event_call != call)
2063                         continue;
2064
2065                 remove_event_file_dir(file);
2066                 /*
2067                  * The do_for_each_event_file_safe() is
2068                  * a double loop. After finding the call for this
2069                  * trace_array, we use break to jump to the next
2070                  * trace_array.
2071                  */
2072                 break;
2073         } while_for_each_event_file();
2074 }
2075
2076 static void event_remove(struct trace_event_call *call)
2077 {
2078         struct trace_array *tr;
2079         struct trace_event_file *file;
2080
2081         do_for_each_event_file(tr, file) {
2082                 if (file->event_call != call)
2083                         continue;
2084
2085                 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2086                         tr->clear_trace = true;
2087
2088                 ftrace_event_enable_disable(file, 0);
2089                 /*
2090                  * The do_for_each_event_file() is
2091                  * a double loop. After finding the call for this
2092                  * trace_array, we use break to jump to the next
2093                  * trace_array.
2094                  */
2095                 break;
2096         } while_for_each_event_file();
2097
2098         if (call->event.funcs)
2099                 __unregister_trace_event(&call->event);
2100         remove_event_from_tracers(call);
2101         list_del(&call->list);
2102 }
2103
2104 static int event_init(struct trace_event_call *call)
2105 {
2106         int ret = 0;
2107         const char *name;
2108
2109         name = trace_event_name(call);
2110         if (WARN_ON(!name))
2111                 return -EINVAL;
2112
2113         if (call->class->raw_init) {
2114                 ret = call->class->raw_init(call);
2115                 if (ret < 0 && ret != -ENOSYS)
2116                         pr_warn("Could not initialize trace events/%s\n", name);
2117         }
2118
2119         return ret;
2120 }
2121
2122 static int
2123 __register_event(struct trace_event_call *call, struct module *mod)
2124 {
2125         int ret;
2126
2127         ret = event_init(call);
2128         if (ret < 0)
2129                 return ret;
2130
2131         list_add(&call->list, &ftrace_events);
2132         call->mod = mod;
2133
2134         return 0;
2135 }
2136
2137 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
2138 {
2139         int rlen;
2140         int elen;
2141
2142         /* Find the length of the eval value as a string */
2143         elen = snprintf(ptr, 0, "%ld", map->eval_value);
2144         /* Make sure there's enough room to replace the string with the value */
2145         if (len < elen)
2146                 return NULL;
2147
2148         snprintf(ptr, elen + 1, "%ld", map->eval_value);
2149
2150         /* Get the rest of the string of ptr */
2151         rlen = strlen(ptr + len);
2152         memmove(ptr + elen, ptr + len, rlen);
2153         /* Make sure we end the new string */
2154         ptr[elen + rlen] = 0;
2155
2156         return ptr + elen;
2157 }
2158
2159 static void update_event_printk(struct trace_event_call *call,
2160                                 struct trace_eval_map *map)
2161 {
2162         char *ptr;
2163         int quote = 0;
2164         int len = strlen(map->eval_string);
2165
2166         for (ptr = call->print_fmt; *ptr; ptr++) {
2167                 if (*ptr == '\\') {
2168                         ptr++;
2169                         /* paranoid */
2170                         if (!*ptr)
2171                                 break;
2172                         continue;
2173                 }
2174                 if (*ptr == '"') {
2175                         quote ^= 1;
2176                         continue;
2177                 }
2178                 if (quote)
2179                         continue;
2180                 if (isdigit(*ptr)) {
2181                         /* skip numbers */
2182                         do {
2183                                 ptr++;
2184                                 /* Check for alpha chars like ULL */
2185                         } while (isalnum(*ptr));
2186                         if (!*ptr)
2187                                 break;
2188                         /*
2189                          * A number must have some kind of delimiter after
2190                          * it, and we can ignore that too.
2191                          */
2192                         continue;
2193                 }
2194                 if (isalpha(*ptr) || *ptr == '_') {
2195                         if (strncmp(map->eval_string, ptr, len) == 0 &&
2196                             !isalnum(ptr[len]) && ptr[len] != '_') {
2197                                 ptr = eval_replace(ptr, map, len);
2198                                 /* enum/sizeof string smaller than value */
2199                                 if (WARN_ON_ONCE(!ptr))
2200                                         return;
2201                                 /*
2202                                  * No need to decrement here, as eval_replace()
2203                                  * returns the pointer to the character passed
2204                                  * the eval, and two evals can not be placed
2205                                  * back to back without something in between.
2206                                  * We can skip that something in between.
2207                                  */
2208                                 continue;
2209                         }
2210                 skip_more:
2211                         do {
2212                                 ptr++;
2213                         } while (isalnum(*ptr) || *ptr == '_');
2214                         if (!*ptr)
2215                                 break;
2216                         /*
2217                          * If what comes after this variable is a '.' or
2218                          * '->' then we can continue to ignore that string.
2219                          */
2220                         if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2221                                 ptr += *ptr == '.' ? 1 : 2;
2222                                 if (!*ptr)
2223                                         break;
2224                                 goto skip_more;
2225                         }
2226                         /*
2227                          * Once again, we can skip the delimiter that came
2228                          * after the string.
2229                          */
2230                         continue;
2231                 }
2232         }
2233 }
2234
2235 void trace_event_eval_update(struct trace_eval_map **map, int len)
2236 {
2237         struct trace_event_call *call, *p;
2238         const char *last_system = NULL;
2239         bool first = false;
2240         int last_i;
2241         int i;
2242
2243         down_write(&trace_event_sem);
2244         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2245                 /* events are usually grouped together with systems */
2246                 if (!last_system || call->class->system != last_system) {
2247                         first = true;
2248                         last_i = 0;
2249                         last_system = call->class->system;
2250                 }
2251
2252                 /*
2253                  * Since calls are grouped by systems, the likelyhood that the
2254                  * next call in the iteration belongs to the same system as the
2255                  * previous call is high. As an optimization, we skip seaching
2256                  * for a map[] that matches the call's system if the last call
2257                  * was from the same system. That's what last_i is for. If the
2258                  * call has the same system as the previous call, then last_i
2259                  * will be the index of the first map[] that has a matching
2260                  * system.
2261                  */
2262                 for (i = last_i; i < len; i++) {
2263                         if (call->class->system == map[i]->system) {
2264                                 /* Save the first system if need be */
2265                                 if (first) {
2266                                         last_i = i;
2267                                         first = false;
2268                                 }
2269                                 update_event_printk(call, map[i]);
2270                         }
2271                 }
2272         }
2273         up_write(&trace_event_sem);
2274 }
2275
2276 static struct trace_event_file *
2277 trace_create_new_event(struct trace_event_call *call,
2278                        struct trace_array *tr)
2279 {
2280         struct trace_event_file *file;
2281
2282         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2283         if (!file)
2284                 return NULL;
2285
2286         file->event_call = call;
2287         file->tr = tr;
2288         atomic_set(&file->sm_ref, 0);
2289         atomic_set(&file->tm_ref, 0);
2290         INIT_LIST_HEAD(&file->triggers);
2291         list_add(&file->list, &tr->events);
2292
2293         return file;
2294 }
2295
2296 /* Add an event to a trace directory */
2297 static int
2298 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2299 {
2300         struct trace_event_file *file;
2301
2302         file = trace_create_new_event(call, tr);
2303         if (!file)
2304                 return -ENOMEM;
2305
2306         return event_create_dir(tr->event_dir, file);
2307 }
2308
2309 /*
2310  * Just create a decriptor for early init. A descriptor is required
2311  * for enabling events at boot. We want to enable events before
2312  * the filesystem is initialized.
2313  */
2314 static __init int
2315 __trace_early_add_new_event(struct trace_event_call *call,
2316                             struct trace_array *tr)
2317 {
2318         struct trace_event_file *file;
2319
2320         file = trace_create_new_event(call, tr);
2321         if (!file)
2322                 return -ENOMEM;
2323
2324         return 0;
2325 }
2326
2327 struct ftrace_module_file_ops;
2328 static void __add_event_to_tracers(struct trace_event_call *call);
2329
2330 /* Add an additional event_call dynamically */
2331 int trace_add_event_call(struct trace_event_call *call)
2332 {
2333         int ret;
2334         lockdep_assert_held(&event_mutex);
2335
2336         mutex_lock(&trace_types_lock);
2337
2338         ret = __register_event(call, NULL);
2339         if (ret >= 0)
2340                 __add_event_to_tracers(call);
2341
2342         mutex_unlock(&trace_types_lock);
2343         return ret;
2344 }
2345
2346 /*
2347  * Must be called under locking of trace_types_lock, event_mutex and
2348  * trace_event_sem.
2349  */
2350 static void __trace_remove_event_call(struct trace_event_call *call)
2351 {
2352         event_remove(call);
2353         trace_destroy_fields(call);
2354         free_event_filter(call->filter);
2355         call->filter = NULL;
2356 }
2357
2358 static int probe_remove_event_call(struct trace_event_call *call)
2359 {
2360         struct trace_array *tr;
2361         struct trace_event_file *file;
2362
2363 #ifdef CONFIG_PERF_EVENTS
2364         if (call->perf_refcount)
2365                 return -EBUSY;
2366 #endif
2367         do_for_each_event_file(tr, file) {
2368                 if (file->event_call != call)
2369                         continue;
2370                 /*
2371                  * We can't rely on ftrace_event_enable_disable(enable => 0)
2372                  * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2373                  * TRACE_REG_UNREGISTER.
2374                  */
2375                 if (file->flags & EVENT_FILE_FL_ENABLED)
2376                         return -EBUSY;
2377                 /*
2378                  * The do_for_each_event_file_safe() is
2379                  * a double loop. After finding the call for this
2380                  * trace_array, we use break to jump to the next
2381                  * trace_array.
2382                  */
2383                 break;
2384         } while_for_each_event_file();
2385
2386         __trace_remove_event_call(call);
2387
2388         return 0;
2389 }
2390
2391 /* Remove an event_call */
2392 int trace_remove_event_call(struct trace_event_call *call)
2393 {
2394         int ret;
2395
2396         lockdep_assert_held(&event_mutex);
2397
2398         mutex_lock(&trace_types_lock);
2399         down_write(&trace_event_sem);
2400         ret = probe_remove_event_call(call);
2401         up_write(&trace_event_sem);
2402         mutex_unlock(&trace_types_lock);
2403
2404         return ret;
2405 }
2406
2407 #define for_each_event(event, start, end)                       \
2408         for (event = start;                                     \
2409              (unsigned long)event < (unsigned long)end;         \
2410              event++)
2411
2412 #ifdef CONFIG_MODULES
2413
2414 static void trace_module_add_events(struct module *mod)
2415 {
2416         struct trace_event_call **call, **start, **end;
2417
2418         if (!mod->num_trace_events)
2419                 return;
2420
2421         /* Don't add infrastructure for mods without tracepoints */
2422         if (trace_module_has_bad_taint(mod)) {
2423                 pr_err("%s: module has bad taint, not creating trace events\n",
2424                        mod->name);
2425                 return;
2426         }
2427
2428         start = mod->trace_events;
2429         end = mod->trace_events + mod->num_trace_events;
2430
2431         for_each_event(call, start, end) {
2432                 __register_event(*call, mod);
2433                 __add_event_to_tracers(*call);
2434         }
2435 }
2436
2437 static void trace_module_remove_events(struct module *mod)
2438 {
2439         struct trace_event_call *call, *p;
2440
2441         down_write(&trace_event_sem);
2442         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2443                 if (call->mod == mod)
2444                         __trace_remove_event_call(call);
2445         }
2446         up_write(&trace_event_sem);
2447
2448         /*
2449          * It is safest to reset the ring buffer if the module being unloaded
2450          * registered any events that were used. The only worry is if
2451          * a new module gets loaded, and takes on the same id as the events
2452          * of this module. When printing out the buffer, traced events left
2453          * over from this module may be passed to the new module events and
2454          * unexpected results may occur.
2455          */
2456         tracing_reset_all_online_cpus();
2457 }
2458
2459 static int trace_module_notify(struct notifier_block *self,
2460                                unsigned long val, void *data)
2461 {
2462         struct module *mod = data;
2463
2464         mutex_lock(&event_mutex);
2465         mutex_lock(&trace_types_lock);
2466         switch (val) {
2467         case MODULE_STATE_COMING:
2468                 trace_module_add_events(mod);
2469                 break;
2470         case MODULE_STATE_GOING:
2471                 trace_module_remove_events(mod);
2472                 break;
2473         }
2474         mutex_unlock(&trace_types_lock);
2475         mutex_unlock(&event_mutex);
2476
2477         return 0;
2478 }
2479
2480 static struct notifier_block trace_module_nb = {
2481         .notifier_call = trace_module_notify,
2482         .priority = 1, /* higher than trace.c module notify */
2483 };
2484 #endif /* CONFIG_MODULES */
2485
2486 /* Create a new event directory structure for a trace directory. */
2487 static void
2488 __trace_add_event_dirs(struct trace_array *tr)
2489 {
2490         struct trace_event_call *call;
2491         int ret;
2492
2493         list_for_each_entry(call, &ftrace_events, list) {
2494                 ret = __trace_add_new_event(call, tr);
2495                 if (ret < 0)
2496                         pr_warn("Could not create directory for event %s\n",
2497                                 trace_event_name(call));
2498         }
2499 }
2500
2501 /* Returns any file that matches the system and event */
2502 struct trace_event_file *
2503 __find_event_file(struct trace_array *tr, const char *system, const char *event)
2504 {
2505         struct trace_event_file *file;
2506         struct trace_event_call *call;
2507         const char *name;
2508
2509         list_for_each_entry(file, &tr->events, list) {
2510
2511                 call = file->event_call;
2512                 name = trace_event_name(call);
2513
2514                 if (!name || !call->class)
2515                         continue;
2516
2517                 if (strcmp(event, name) == 0 &&
2518                     strcmp(system, call->class->system) == 0)
2519                         return file;
2520         }
2521         return NULL;
2522 }
2523
2524 /* Returns valid trace event files that match system and event */
2525 struct trace_event_file *
2526 find_event_file(struct trace_array *tr, const char *system, const char *event)
2527 {
2528         struct trace_event_file *file;
2529
2530         file = __find_event_file(tr, system, event);
2531         if (!file || !file->event_call->class->reg ||
2532             file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2533                 return NULL;
2534
2535         return file;
2536 }
2537
2538 #ifdef CONFIG_DYNAMIC_FTRACE
2539
2540 /* Avoid typos */
2541 #define ENABLE_EVENT_STR        "enable_event"
2542 #define DISABLE_EVENT_STR       "disable_event"
2543
2544 struct event_probe_data {
2545         struct trace_event_file *file;
2546         unsigned long                   count;
2547         int                             ref;
2548         bool                            enable;
2549 };
2550
2551 static void update_event_probe(struct event_probe_data *data)
2552 {
2553         if (data->enable)
2554                 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2555         else
2556                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2557 }
2558
2559 static void
2560 event_enable_probe(unsigned long ip, unsigned long parent_ip,
2561                    struct trace_array *tr, struct ftrace_probe_ops *ops,
2562                    void *data)
2563 {
2564         struct ftrace_func_mapper *mapper = data;
2565         struct event_probe_data *edata;
2566         void **pdata;
2567
2568         pdata = ftrace_func_mapper_find_ip(mapper, ip);
2569         if (!pdata || !*pdata)
2570                 return;
2571
2572         edata = *pdata;
2573         update_event_probe(edata);
2574 }
2575
2576 static void
2577 event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
2578                          struct trace_array *tr, struct ftrace_probe_ops *ops,
2579                          void *data)
2580 {
2581         struct ftrace_func_mapper *mapper = data;
2582         struct event_probe_data *edata;
2583         void **pdata;
2584
2585         pdata = ftrace_func_mapper_find_ip(mapper, ip);
2586         if (!pdata || !*pdata)
2587                 return;
2588
2589         edata = *pdata;
2590
2591         if (!edata->count)
2592                 return;
2593
2594         /* Skip if the event is in a state we want to switch to */
2595         if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
2596                 return;
2597
2598         if (edata->count != -1)
2599                 (edata->count)--;
2600
2601         update_event_probe(edata);
2602 }
2603
2604 static int
2605 event_enable_print(struct seq_file *m, unsigned long ip,
2606                    struct ftrace_probe_ops *ops, void *data)
2607 {
2608         struct ftrace_func_mapper *mapper = data;
2609         struct event_probe_data *edata;
2610         void **pdata;
2611
2612         pdata = ftrace_func_mapper_find_ip(mapper, ip);
2613
2614         if (WARN_ON_ONCE(!pdata || !*pdata))
2615                 return 0;
2616
2617         edata = *pdata;
2618
2619         seq_printf(m, "%ps:", (void *)ip);
2620
2621         seq_printf(m, "%s:%s:%s",
2622                    edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2623                    edata->file->event_call->class->system,
2624                    trace_event_name(edata->file->event_call));
2625
2626         if (edata->count == -1)
2627                 seq_puts(m, ":unlimited\n");
2628         else
2629                 seq_printf(m, ":count=%ld\n", edata->count);
2630
2631         return 0;
2632 }
2633
2634 static int
2635 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
2636                   unsigned long ip, void *init_data, void **data)
2637 {
2638         struct ftrace_func_mapper *mapper = *data;
2639         struct event_probe_data *edata = init_data;
2640         int ret;
2641
2642         if (!mapper) {
2643                 mapper = allocate_ftrace_func_mapper();
2644                 if (!mapper)
2645                         return -ENODEV;
2646                 *data = mapper;
2647         }
2648
2649         ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
2650         if (ret < 0)
2651                 return ret;
2652
2653         edata->ref++;
2654
2655         return 0;
2656 }
2657
2658 static int free_probe_data(void *data)
2659 {
2660         struct event_probe_data *edata = data;
2661
2662         edata->ref--;
2663         if (!edata->ref) {
2664                 /* Remove the SOFT_MODE flag */
2665                 __ftrace_event_enable_disable(edata->file, 0, 1);
2666                 module_put(edata->file->event_call->mod);
2667                 kfree(edata);
2668         }
2669         return 0;
2670 }
2671
2672 static void
2673 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
2674                   unsigned long ip, void *data)
2675 {
2676         struct ftrace_func_mapper *mapper = data;
2677         struct event_probe_data *edata;
2678
2679         if (!ip) {
2680                 if (!mapper)
2681                         return;
2682                 free_ftrace_func_mapper(mapper, free_probe_data);
2683                 return;
2684         }
2685
2686         edata = ftrace_func_mapper_remove_ip(mapper, ip);
2687
2688         if (WARN_ON_ONCE(!edata))
2689                 return;
2690
2691         if (WARN_ON_ONCE(edata->ref <= 0))
2692                 return;
2693
2694         free_probe_data(edata);
2695 }
2696
2697 static struct ftrace_probe_ops event_enable_probe_ops = {
2698         .func                   = event_enable_probe,
2699         .print                  = event_enable_print,
2700         .init                   = event_enable_init,
2701         .free                   = event_enable_free,
2702 };
2703
2704 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2705         .func                   = event_enable_count_probe,
2706         .print                  = event_enable_print,
2707         .init                   = event_enable_init,
2708         .free                   = event_enable_free,
2709 };
2710
2711 static struct ftrace_probe_ops event_disable_probe_ops = {
2712         .func                   = event_enable_probe,
2713         .print                  = event_enable_print,
2714         .init                   = event_enable_init,
2715         .free                   = event_enable_free,
2716 };
2717
2718 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2719         .func                   = event_enable_count_probe,
2720         .print                  = event_enable_print,
2721         .init                   = event_enable_init,
2722         .free                   = event_enable_free,
2723 };
2724
2725 static int
2726 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
2727                   char *glob, char *cmd, char *param, int enabled)
2728 {
2729         struct trace_event_file *file;
2730         struct ftrace_probe_ops *ops;
2731         struct event_probe_data *data;
2732         const char *system;
2733         const char *event;
2734         char *number;
2735         bool enable;
2736         int ret;
2737
2738         if (!tr)
2739                 return -ENODEV;
2740
2741         /* hash funcs only work with set_ftrace_filter */
2742         if (!enabled || !param)
2743                 return -EINVAL;
2744
2745         system = strsep(&param, ":");
2746         if (!param)
2747                 return -EINVAL;
2748
2749         event = strsep(&param, ":");
2750
2751         mutex_lock(&event_mutex);
2752
2753         ret = -EINVAL;
2754         file = find_event_file(tr, system, event);
2755         if (!file)
2756                 goto out;
2757
2758         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2759
2760         if (enable)
2761                 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2762         else
2763                 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2764
2765         if (glob[0] == '!') {
2766                 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
2767                 goto out;
2768         }
2769
2770         ret = -ENOMEM;
2771
2772         data = kzalloc(sizeof(*data), GFP_KERNEL);
2773         if (!data)
2774                 goto out;
2775
2776         data->enable = enable;
2777         data->count = -1;
2778         data->file = file;
2779
2780         if (!param)
2781                 goto out_reg;
2782
2783         number = strsep(&param, ":");
2784
2785         ret = -EINVAL;
2786         if (!strlen(number))
2787                 goto out_free;
2788
2789         /*
2790          * We use the callback data field (which is a pointer)
2791          * as our counter.
2792          */
2793         ret = kstrtoul(number, 0, &data->count);
2794         if (ret)
2795                 goto out_free;
2796
2797  out_reg:
2798         /* Don't let event modules unload while probe registered */
2799         ret = try_module_get(file->event_call->mod);
2800         if (!ret) {
2801                 ret = -EBUSY;
2802                 goto out_free;
2803         }
2804
2805         ret = __ftrace_event_enable_disable(file, 1, 1);
2806         if (ret < 0)
2807                 goto out_put;
2808
2809         ret = register_ftrace_function_probe(glob, tr, ops, data);
2810         /*
2811          * The above returns on success the # of functions enabled,
2812          * but if it didn't find any functions it returns zero.
2813          * Consider no functions a failure too.
2814          */
2815         if (!ret) {
2816                 ret = -ENOENT;
2817                 goto out_disable;
2818         } else if (ret < 0)
2819                 goto out_disable;
2820         /* Just return zero, not the number of enabled functions */
2821         ret = 0;
2822  out:
2823         mutex_unlock(&event_mutex);
2824         return ret;
2825
2826  out_disable:
2827         __ftrace_event_enable_disable(file, 0, 1);
2828  out_put:
2829         module_put(file->event_call->mod);
2830  out_free:
2831         kfree(data);
2832         goto out;
2833 }
2834
2835 static struct ftrace_func_command event_enable_cmd = {
2836         .name                   = ENABLE_EVENT_STR,
2837         .func                   = event_enable_func,
2838 };
2839
2840 static struct ftrace_func_command event_disable_cmd = {
2841         .name                   = DISABLE_EVENT_STR,
2842         .func                   = event_enable_func,
2843 };
2844
2845 static __init int register_event_cmds(void)
2846 {
2847         int ret;
2848
2849         ret = register_ftrace_command(&event_enable_cmd);
2850         if (WARN_ON(ret < 0))
2851                 return ret;
2852         ret = register_ftrace_command(&event_disable_cmd);
2853         if (WARN_ON(ret < 0))
2854                 unregister_ftrace_command(&event_enable_cmd);
2855         return ret;
2856 }
2857 #else
2858 static inline int register_event_cmds(void) { return 0; }
2859 #endif /* CONFIG_DYNAMIC_FTRACE */
2860
2861 /*
2862  * The top level array has already had its trace_event_file
2863  * descriptors created in order to allow for early events to
2864  * be recorded. This function is called after the tracefs has been
2865  * initialized, and we now have to create the files associated
2866  * to the events.
2867  */
2868 static __init void
2869 __trace_early_add_event_dirs(struct trace_array *tr)
2870 {
2871         struct trace_event_file *file;
2872         int ret;
2873
2874
2875         list_for_each_entry(file, &tr->events, list) {
2876                 ret = event_create_dir(tr->event_dir, file);
2877                 if (ret < 0)
2878                         pr_warn("Could not create directory for event %s\n",
2879                                 trace_event_name(file->event_call));
2880         }
2881 }
2882
2883 /*
2884  * For early boot up, the top trace array requires to have
2885  * a list of events that can be enabled. This must be done before
2886  * the filesystem is set up in order to allow events to be traced
2887  * early.
2888  */
2889 static __init void
2890 __trace_early_add_events(struct trace_array *tr)
2891 {
2892         struct trace_event_call *call;
2893         int ret;
2894
2895         list_for_each_entry(call, &ftrace_events, list) {
2896                 /* Early boot up should not have any modules loaded */
2897                 if (WARN_ON_ONCE(call->mod))
2898                         continue;
2899
2900                 ret = __trace_early_add_new_event(call, tr);
2901                 if (ret < 0)
2902                         pr_warn("Could not create early event %s\n",
2903                                 trace_event_name(call));
2904         }
2905 }
2906
2907 /* Remove the event directory structure for a trace directory. */
2908 static void
2909 __trace_remove_event_dirs(struct trace_array *tr)
2910 {
2911         struct trace_event_file *file, *next;
2912
2913         list_for_each_entry_safe(file, next, &tr->events, list)
2914                 remove_event_file_dir(file);
2915 }
2916
2917 static void __add_event_to_tracers(struct trace_event_call *call)
2918 {
2919         struct trace_array *tr;
2920
2921         list_for_each_entry(tr, &ftrace_trace_arrays, list)
2922                 __trace_add_new_event(call, tr);
2923 }
2924
2925 extern struct trace_event_call *__start_ftrace_events[];
2926 extern struct trace_event_call *__stop_ftrace_events[];
2927
2928 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2929
2930 static __init int setup_trace_event(char *str)
2931 {
2932         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2933         ring_buffer_expanded = true;
2934         tracing_selftest_disabled = true;
2935
2936         return 1;
2937 }
2938 __setup("trace_event=", setup_trace_event);
2939
2940 /* Expects to have event_mutex held when called */
2941 static int
2942 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2943 {
2944         struct dentry *d_events;
2945         struct dentry *entry;
2946
2947         entry = tracefs_create_file("set_event", 0644, parent,
2948                                     tr, &ftrace_set_event_fops);
2949         if (!entry) {
2950                 pr_warn("Could not create tracefs 'set_event' entry\n");
2951                 return -ENOMEM;
2952         }
2953
2954         d_events = tracefs_create_dir("events", parent);
2955         if (!d_events) {
2956                 pr_warn("Could not create tracefs 'events' directory\n");
2957                 return -ENOMEM;
2958         }
2959
2960         entry = trace_create_file("enable", 0644, d_events,
2961                                   tr, &ftrace_tr_enable_fops);
2962         if (!entry) {
2963                 pr_warn("Could not create tracefs 'enable' entry\n");
2964                 return -ENOMEM;
2965         }
2966
2967         /* There are not as crucial, just warn if they are not created */
2968
2969         entry = tracefs_create_file("set_event_pid", 0644, parent,
2970                                     tr, &ftrace_set_event_pid_fops);
2971         if (!entry)
2972                 pr_warn("Could not create tracefs 'set_event_pid' entry\n");
2973
2974         /* ring buffer internal formats */
2975         entry = trace_create_file("header_page", 0444, d_events,
2976                                   ring_buffer_print_page_header,
2977                                   &ftrace_show_header_fops);
2978         if (!entry)
2979                 pr_warn("Could not create tracefs 'header_page' entry\n");
2980
2981         entry = trace_create_file("header_event", 0444, d_events,
2982                                   ring_buffer_print_entry_header,
2983                                   &ftrace_show_header_fops);
2984         if (!entry)
2985                 pr_warn("Could not create tracefs 'header_event' entry\n");
2986
2987         tr->event_dir = d_events;
2988
2989         return 0;
2990 }
2991
2992 /**
2993  * event_trace_add_tracer - add a instance of a trace_array to events
2994  * @parent: The parent dentry to place the files/directories for events in
2995  * @tr: The trace array associated with these events
2996  *
2997  * When a new instance is created, it needs to set up its events
2998  * directory, as well as other files associated with events. It also
2999  * creates the event hierachry in the @parent/events directory.
3000  *
3001  * Returns 0 on success.
3002  *
3003  * Must be called with event_mutex held.
3004  */
3005 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
3006 {
3007         int ret;
3008
3009         lockdep_assert_held(&event_mutex);
3010
3011         ret = create_event_toplevel_files(parent, tr);
3012         if (ret)
3013                 goto out;
3014
3015         down_write(&trace_event_sem);
3016         __trace_add_event_dirs(tr);
3017         up_write(&trace_event_sem);
3018
3019  out:
3020         return ret;
3021 }
3022
3023 /*
3024  * The top trace array already had its file descriptors created.
3025  * Now the files themselves need to be created.
3026  */
3027 static __init int
3028 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
3029 {
3030         int ret;
3031
3032         mutex_lock(&event_mutex);
3033
3034         ret = create_event_toplevel_files(parent, tr);
3035         if (ret)
3036                 goto out_unlock;
3037
3038         down_write(&trace_event_sem);
3039         __trace_early_add_event_dirs(tr);
3040         up_write(&trace_event_sem);
3041
3042  out_unlock:
3043         mutex_unlock(&event_mutex);
3044
3045         return ret;
3046 }
3047
3048 /* Must be called with event_mutex held */
3049 int event_trace_del_tracer(struct trace_array *tr)
3050 {
3051         lockdep_assert_held(&event_mutex);
3052
3053         /* Disable any event triggers and associated soft-disabled events */
3054         clear_event_triggers(tr);
3055
3056         /* Clear the pid list */
3057         __ftrace_clear_event_pids(tr);
3058
3059         /* Disable any running events */
3060         __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
3061
3062         /* Make sure no more events are being executed */
3063         tracepoint_synchronize_unregister();
3064
3065         down_write(&trace_event_sem);
3066         __trace_remove_event_dirs(tr);
3067         tracefs_remove_recursive(tr->event_dir);
3068         up_write(&trace_event_sem);
3069
3070         tr->event_dir = NULL;
3071
3072         return 0;
3073 }
3074
3075 static __init int event_trace_memsetup(void)
3076 {
3077         field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
3078         file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
3079         return 0;
3080 }
3081
3082 static __init void
3083 early_enable_events(struct trace_array *tr, bool disable_first)
3084 {
3085         char *buf = bootup_event_buf;
3086         char *token;
3087         int ret;
3088
3089         while (true) {
3090                 token = strsep(&buf, ",");
3091
3092                 if (!token)
3093                         break;
3094
3095                 if (*token) {
3096                         /* Restarting syscalls requires that we stop them first */
3097                         if (disable_first)
3098                                 ftrace_set_clr_event(tr, token, 0);
3099
3100                         ret = ftrace_set_clr_event(tr, token, 1);
3101                         if (ret)
3102                                 pr_warn("Failed to enable trace event: %s\n", token);
3103                 }
3104
3105                 /* Put back the comma to allow this to be called again */
3106                 if (buf)
3107                         *(buf - 1) = ',';
3108         }
3109 }
3110
3111 static __init int event_trace_enable(void)
3112 {
3113         struct trace_array *tr = top_trace_array();
3114         struct trace_event_call **iter, *call;
3115         int ret;
3116
3117         if (!tr)
3118                 return -ENODEV;
3119
3120         for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3121
3122                 call = *iter;
3123                 ret = event_init(call);
3124                 if (!ret)
3125                         list_add(&call->list, &ftrace_events);
3126         }
3127
3128         /*
3129          * We need the top trace array to have a working set of trace
3130          * points at early init, before the debug files and directories
3131          * are created. Create the file entries now, and attach them
3132          * to the actual file dentries later.
3133          */
3134         __trace_early_add_events(tr);
3135
3136         early_enable_events(tr, false);
3137
3138         trace_printk_start_comm();
3139
3140         register_event_cmds();
3141
3142         register_trigger_cmds();
3143
3144         return 0;
3145 }
3146
3147 /*
3148  * event_trace_enable() is called from trace_event_init() first to
3149  * initialize events and perhaps start any events that are on the
3150  * command line. Unfortunately, there are some events that will not
3151  * start this early, like the system call tracepoints that need
3152  * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
3153  * is called before pid 1 starts, and this flag is never set, making
3154  * the syscall tracepoint never get reached, but the event is enabled
3155  * regardless (and not doing anything).
3156  */
3157 static __init int event_trace_enable_again(void)
3158 {
3159         struct trace_array *tr;
3160
3161         tr = top_trace_array();
3162         if (!tr)
3163                 return -ENODEV;
3164
3165         early_enable_events(tr, true);
3166
3167         return 0;
3168 }
3169
3170 early_initcall(event_trace_enable_again);
3171
3172 __init int event_trace_init(void)
3173 {
3174         struct trace_array *tr;
3175         struct dentry *d_tracer;
3176         struct dentry *entry;
3177         int ret;
3178
3179         tr = top_trace_array();
3180         if (!tr)
3181                 return -ENODEV;
3182
3183         d_tracer = tracing_init_dentry();
3184         if (IS_ERR(d_tracer))
3185                 return 0;
3186
3187         entry = tracefs_create_file("available_events", 0444, d_tracer,
3188                                     tr, &ftrace_avail_fops);
3189         if (!entry)
3190                 pr_warn("Could not create tracefs 'available_events' entry\n");
3191
3192         if (trace_define_generic_fields())
3193                 pr_warn("tracing: Failed to allocated generic fields");
3194
3195         if (trace_define_common_fields())
3196                 pr_warn("tracing: Failed to allocate common fields");
3197
3198         ret = early_event_add_tracer(d_tracer, tr);
3199         if (ret)
3200                 return ret;
3201
3202 #ifdef CONFIG_MODULES
3203         ret = register_module_notifier(&trace_module_nb);
3204         if (ret)
3205                 pr_warn("Failed to register trace events module notifier\n");
3206 #endif
3207         return 0;
3208 }
3209
3210 void __init trace_event_init(void)
3211 {
3212         event_trace_memsetup();
3213         init_ftrace_syscalls();
3214         event_trace_enable();
3215 }
3216
3217 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
3218
3219 static DEFINE_SPINLOCK(test_spinlock);
3220 static DEFINE_SPINLOCK(test_spinlock_irq);
3221 static DEFINE_MUTEX(test_mutex);
3222
3223 static __init void test_work(struct work_struct *dummy)
3224 {
3225         spin_lock(&test_spinlock);
3226         spin_lock_irq(&test_spinlock_irq);
3227         udelay(1);
3228         spin_unlock_irq(&test_spinlock_irq);
3229         spin_unlock(&test_spinlock);
3230
3231         mutex_lock(&test_mutex);
3232         msleep(1);
3233         mutex_unlock(&test_mutex);
3234 }
3235
3236 static __init int event_test_thread(void *unused)
3237 {
3238         void *test_malloc;
3239
3240         test_malloc = kmalloc(1234, GFP_KERNEL);
3241         if (!test_malloc)
3242                 pr_info("failed to kmalloc\n");
3243
3244         schedule_on_each_cpu(test_work);
3245
3246         kfree(test_malloc);
3247
3248         set_current_state(TASK_INTERRUPTIBLE);
3249         while (!kthread_should_stop()) {
3250                 schedule();
3251                 set_current_state(TASK_INTERRUPTIBLE);
3252         }
3253         __set_current_state(TASK_RUNNING);
3254
3255         return 0;
3256 }
3257
3258 /*
3259  * Do various things that may trigger events.
3260  */
3261 static __init void event_test_stuff(void)
3262 {
3263         struct task_struct *test_thread;
3264
3265         test_thread = kthread_run(event_test_thread, NULL, "test-events");
3266         msleep(1);
3267         kthread_stop(test_thread);
3268 }
3269
3270 /*
3271  * For every trace event defined, we will test each trace point separately,
3272  * and then by groups, and finally all trace points.
3273  */
3274 static __init void event_trace_self_tests(void)
3275 {
3276         struct trace_subsystem_dir *dir;
3277         struct trace_event_file *file;
3278         struct trace_event_call *call;
3279         struct event_subsystem *system;
3280         struct trace_array *tr;
3281         int ret;
3282
3283         tr = top_trace_array();
3284         if (!tr)
3285                 return;
3286
3287         pr_info("Running tests on trace events:\n");
3288
3289         list_for_each_entry(file, &tr->events, list) {
3290
3291                 call = file->event_call;
3292
3293                 /* Only test those that have a probe */
3294                 if (!call->class || !call->class->probe)
3295                         continue;
3296
3297 /*
3298  * Testing syscall events here is pretty useless, but
3299  * we still do it if configured. But this is time consuming.
3300  * What we really need is a user thread to perform the
3301  * syscalls as we test.
3302  */
3303 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3304                 if (call->class->system &&
3305                     strcmp(call->class->system, "syscalls") == 0)
3306                         continue;
3307 #endif
3308
3309                 pr_info("Testing event %s: ", trace_event_name(call));
3310
3311                 /*
3312                  * If an event is already enabled, someone is using
3313                  * it and the self test should not be on.
3314                  */
3315                 if (file->flags & EVENT_FILE_FL_ENABLED) {
3316                         pr_warn("Enabled event during self test!\n");
3317                         WARN_ON_ONCE(1);
3318                         continue;
3319                 }
3320
3321                 ftrace_event_enable_disable(file, 1);
3322                 event_test_stuff();
3323                 ftrace_event_enable_disable(file, 0);
3324
3325                 pr_cont("OK\n");
3326         }
3327
3328         /* Now test at the sub system level */
3329
3330         pr_info("Running tests on trace event systems:\n");
3331
3332         list_for_each_entry(dir, &tr->systems, list) {
3333
3334                 system = dir->subsystem;
3335
3336                 /* the ftrace system is special, skip it */
3337                 if (strcmp(system->name, "ftrace") == 0)
3338                         continue;
3339
3340                 pr_info("Testing event system %s: ", system->name);
3341
3342                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3343                 if (WARN_ON_ONCE(ret)) {
3344                         pr_warn("error enabling system %s\n",
3345                                 system->name);
3346                         continue;
3347                 }
3348
3349                 event_test_stuff();
3350
3351                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3352                 if (WARN_ON_ONCE(ret)) {
3353                         pr_warn("error disabling system %s\n",
3354                                 system->name);
3355                         continue;
3356                 }
3357
3358                 pr_cont("OK\n");
3359         }
3360
3361         /* Test with all events enabled */
3362
3363         pr_info("Running tests on all trace events:\n");
3364         pr_info("Testing all events: ");
3365
3366         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3367         if (WARN_ON_ONCE(ret)) {
3368                 pr_warn("error enabling all events\n");
3369                 return;
3370         }
3371
3372         event_test_stuff();
3373
3374         /* reset sysname */
3375         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3376         if (WARN_ON_ONCE(ret)) {
3377                 pr_warn("error disabling all events\n");
3378                 return;
3379         }
3380
3381         pr_cont("OK\n");
3382 }
3383
3384 #ifdef CONFIG_FUNCTION_TRACER
3385
3386 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
3387
3388 static struct trace_event_file event_trace_file __initdata;
3389
3390 static void __init
3391 function_test_events_call(unsigned long ip, unsigned long parent_ip,
3392                           struct ftrace_ops *op, struct pt_regs *pt_regs)
3393 {
3394         struct ring_buffer_event *event;
3395         struct ring_buffer *buffer;
3396         struct ftrace_entry *entry;
3397         unsigned long flags;
3398         long disabled;
3399         int cpu;
3400         int pc;
3401
3402         pc = preempt_count();
3403         preempt_disable_notrace();
3404         cpu = raw_smp_processor_id();
3405         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
3406
3407         if (disabled != 1)
3408                 goto out;
3409
3410         local_save_flags(flags);
3411
3412         event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
3413                                                 TRACE_FN, sizeof(*entry),
3414                                                 flags, pc);
3415         if (!event)
3416                 goto out;
3417         entry   = ring_buffer_event_data(event);
3418         entry->ip                       = ip;
3419         entry->parent_ip                = parent_ip;
3420
3421         event_trigger_unlock_commit(&event_trace_file, buffer, event,
3422                                     entry, flags, pc);
3423  out:
3424         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
3425         preempt_enable_notrace();
3426 }
3427
3428 static struct ftrace_ops trace_ops __initdata  =
3429 {
3430         .func = function_test_events_call,
3431         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
3432 };
3433
3434 static __init void event_trace_self_test_with_function(void)
3435 {
3436         int ret;
3437
3438         event_trace_file.tr = top_trace_array();
3439         if (WARN_ON(!event_trace_file.tr))
3440                 return;
3441
3442         ret = register_ftrace_function(&trace_ops);
3443         if (WARN_ON(ret < 0)) {
3444                 pr_info("Failed to enable function tracer for event tests\n");
3445                 return;
3446         }
3447         pr_info("Running tests again, along with the function tracer\n");
3448         event_trace_self_tests();
3449         unregister_ftrace_function(&trace_ops);
3450 }
3451 #else
3452 static __init void event_trace_self_test_with_function(void)
3453 {
3454 }
3455 #endif
3456
3457 static __init int event_trace_self_tests_init(void)
3458 {
3459         if (!tracing_selftest_disabled) {
3460                 event_trace_self_tests();
3461                 event_trace_self_test_with_function();
3462         }
3463
3464         return 0;
3465 }
3466
3467 late_initcall(event_trace_self_tests_init);
3468
3469 #endif