4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
11 #define pr_fmt(fmt) fmt
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <linux/kthread.h>
16 #include <linux/tracefs.h>
17 #include <linux/uaccess.h>
18 #include <linux/bsearch.h>
19 #include <linux/module.h>
20 #include <linux/ctype.h>
21 #include <linux/sort.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
25 #include <asm/setup.h>
27 #include "trace_output.h"
30 #define TRACE_SYSTEM "TRACE_SYSTEM"
32 DEFINE_MUTEX(event_mutex);
34 LIST_HEAD(ftrace_events);
35 static LIST_HEAD(ftrace_generic_fields);
36 static LIST_HEAD(ftrace_common_fields);
38 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40 static struct kmem_cache *field_cachep;
41 static struct kmem_cache *file_cachep;
43 static inline int system_refcount(struct event_subsystem *system)
45 return system->ref_count;
48 static int system_refcount_inc(struct event_subsystem *system)
50 return system->ref_count++;
53 static int system_refcount_dec(struct event_subsystem *system)
55 return --system->ref_count;
58 /* Double loops, do not use break, only goto's work */
59 #define do_for_each_event_file(tr, file) \
60 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
61 list_for_each_entry(file, &tr->events, list)
63 #define do_for_each_event_file_safe(tr, file) \
64 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
65 struct trace_event_file *___n; \
66 list_for_each_entry_safe(file, ___n, &tr->events, list)
68 #define while_for_each_event_file() \
71 static struct list_head *
72 trace_get_fields(struct trace_event_call *event_call)
74 if (!event_call->class->get_fields)
75 return &event_call->class->fields;
76 return event_call->class->get_fields(event_call);
79 static struct ftrace_event_field *
80 __find_event_field(struct list_head *head, char *name)
82 struct ftrace_event_field *field;
84 list_for_each_entry(field, head, link) {
85 if (!strcmp(field->name, name))
92 struct ftrace_event_field *
93 trace_find_event_field(struct trace_event_call *call, char *name)
95 struct ftrace_event_field *field;
96 struct list_head *head;
98 field = __find_event_field(&ftrace_generic_fields, name);
102 field = __find_event_field(&ftrace_common_fields, name);
106 head = trace_get_fields(call);
107 return __find_event_field(head, name);
110 static int __trace_define_field(struct list_head *head, const char *type,
111 const char *name, int offset, int size,
112 int is_signed, int filter_type)
114 struct ftrace_event_field *field;
116 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
123 if (filter_type == FILTER_OTHER)
124 field->filter_type = filter_assign_type(type);
126 field->filter_type = filter_type;
128 field->offset = offset;
130 field->is_signed = is_signed;
132 list_add(&field->link, head);
137 int trace_define_field(struct trace_event_call *call, const char *type,
138 const char *name, int offset, int size, int is_signed,
141 struct list_head *head;
143 if (WARN_ON(!call->class))
146 head = trace_get_fields(call);
147 return __trace_define_field(head, type, name, offset, size,
148 is_signed, filter_type);
150 EXPORT_SYMBOL_GPL(trace_define_field);
152 #define __generic_field(type, item, filter_type) \
153 ret = __trace_define_field(&ftrace_generic_fields, #type, \
154 #item, 0, 0, is_signed_type(type), \
159 #define __common_field(type, item) \
160 ret = __trace_define_field(&ftrace_common_fields, #type, \
162 offsetof(typeof(ent), item), \
164 is_signed_type(type), FILTER_OTHER); \
168 static int trace_define_generic_fields(void)
172 __generic_field(int, cpu, FILTER_OTHER);
173 __generic_field(char *, comm, FILTER_PTR_STRING);
178 static int trace_define_common_fields(void)
181 struct trace_entry ent;
183 __common_field(unsigned short, type);
184 __common_field(unsigned char, flags);
185 __common_field(unsigned char, preempt_count);
186 __common_field(int, pid);
191 static void trace_destroy_fields(struct trace_event_call *call)
193 struct ftrace_event_field *field, *next;
194 struct list_head *head;
196 head = trace_get_fields(call);
197 list_for_each_entry_safe(field, next, head, link) {
198 list_del(&field->link);
199 kmem_cache_free(field_cachep, field);
203 int trace_event_raw_init(struct trace_event_call *call)
207 id = register_trace_event(&call->event);
213 EXPORT_SYMBOL_GPL(trace_event_raw_init);
215 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
216 struct trace_event_file *trace_file,
219 struct trace_event_call *event_call = trace_file->event_call;
221 local_save_flags(fbuffer->flags);
222 fbuffer->pc = preempt_count();
223 fbuffer->trace_file = trace_file;
226 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
227 event_call->event.type, len,
228 fbuffer->flags, fbuffer->pc);
232 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
233 return fbuffer->entry;
235 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
237 static DEFINE_SPINLOCK(tracepoint_iter_lock);
239 static void output_printk(struct trace_event_buffer *fbuffer)
241 struct trace_event_call *event_call;
242 struct trace_event *event;
244 struct trace_iterator *iter = tracepoint_print_iter;
249 event_call = fbuffer->trace_file->event_call;
250 if (!event_call || !event_call->event.funcs ||
251 !event_call->event.funcs->trace)
254 event = &fbuffer->trace_file->event_call->event;
256 spin_lock_irqsave(&tracepoint_iter_lock, flags);
257 trace_seq_init(&iter->seq);
258 iter->ent = fbuffer->entry;
259 event_call->event.funcs->trace(iter, 0, event);
260 trace_seq_putc(&iter->seq, 0);
261 printk("%s", iter->seq.buffer);
263 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
266 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
268 if (tracepoint_printk)
269 output_printk(fbuffer);
271 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
272 fbuffer->event, fbuffer->entry,
273 fbuffer->flags, fbuffer->pc);
275 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
277 int trace_event_reg(struct trace_event_call *call,
278 enum trace_reg type, void *data)
280 struct trace_event_file *file = data;
282 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
284 case TRACE_REG_REGISTER:
285 return tracepoint_probe_register(call->tp,
288 case TRACE_REG_UNREGISTER:
289 tracepoint_probe_unregister(call->tp,
294 #ifdef CONFIG_PERF_EVENTS
295 case TRACE_REG_PERF_REGISTER:
296 return tracepoint_probe_register(call->tp,
297 call->class->perf_probe,
299 case TRACE_REG_PERF_UNREGISTER:
300 tracepoint_probe_unregister(call->tp,
301 call->class->perf_probe,
304 case TRACE_REG_PERF_OPEN:
305 case TRACE_REG_PERF_CLOSE:
306 case TRACE_REG_PERF_ADD:
307 case TRACE_REG_PERF_DEL:
313 EXPORT_SYMBOL_GPL(trace_event_reg);
315 void trace_event_enable_cmd_record(bool enable)
317 struct trace_event_file *file;
318 struct trace_array *tr;
320 mutex_lock(&event_mutex);
321 do_for_each_event_file(tr, file) {
323 if (!(file->flags & EVENT_FILE_FL_ENABLED))
327 tracing_start_cmdline_record();
328 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
330 tracing_stop_cmdline_record();
331 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
333 } while_for_each_event_file();
334 mutex_unlock(&event_mutex);
337 static int __ftrace_event_enable_disable(struct trace_event_file *file,
338 int enable, int soft_disable)
340 struct trace_event_call *call = file->event_call;
341 struct trace_array *tr = file->tr;
348 * When soft_disable is set and enable is cleared, the sm_ref
349 * reference counter is decremented. If it reaches 0, we want
350 * to clear the SOFT_DISABLED flag but leave the event in the
351 * state that it was. That is, if the event was enabled and
352 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
353 * is set we do not want the event to be enabled before we
356 * When soft_disable is not set but the SOFT_MODE flag is,
357 * we do nothing. Do not disable the tracepoint, otherwise
358 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
361 if (atomic_dec_return(&file->sm_ref) > 0)
363 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
364 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
366 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
368 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
369 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
370 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
371 tracing_stop_cmdline_record();
372 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
374 call->class->reg(call, TRACE_REG_UNREGISTER, file);
376 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
377 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
378 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
380 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
384 * When soft_disable is set and enable is set, we want to
385 * register the tracepoint for the event, but leave the event
386 * as is. That means, if the event was already enabled, we do
387 * nothing (but set SOFT_MODE). If the event is disabled, we
388 * set SOFT_DISABLED before enabling the event tracepoint, so
389 * it still seems to be disabled.
392 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
394 if (atomic_inc_return(&file->sm_ref) > 1)
396 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
399 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
401 /* Keep the event disabled, when going to SOFT_MODE. */
403 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
405 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
406 tracing_start_cmdline_record();
407 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
409 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
411 tracing_stop_cmdline_record();
412 pr_info("event trace: Could not enable event "
413 "%s\n", trace_event_name(call));
416 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
418 /* WAS_ENABLED gets set but never cleared. */
419 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
427 int trace_event_enable_disable(struct trace_event_file *file,
428 int enable, int soft_disable)
430 return __ftrace_event_enable_disable(file, enable, soft_disable);
433 static int ftrace_event_enable_disable(struct trace_event_file *file,
436 return __ftrace_event_enable_disable(file, enable, 0);
439 static void ftrace_clear_events(struct trace_array *tr)
441 struct trace_event_file *file;
443 mutex_lock(&event_mutex);
444 list_for_each_entry(file, &tr->events, list) {
445 ftrace_event_enable_disable(file, 0);
447 mutex_unlock(&event_mutex);
450 static int cmp_pid(const void *key, const void *elt)
452 const pid_t *search_pid = key;
453 const pid_t *pid = elt;
455 if (*search_pid == *pid)
457 if (*search_pid < *pid)
462 static void __ftrace_clear_event_pids(struct trace_array *tr)
464 struct trace_pid_list *pid_list;
466 pid_list = rcu_dereference_protected(tr->filtered_pids,
467 lockdep_is_held(&event_mutex));
471 rcu_assign_pointer(tr->filtered_pids, NULL);
473 /* Wait till all users are no longer using pid filtering */
476 free_pages((unsigned long)pid_list->pids, pid_list->order);
480 static void ftrace_clear_event_pids(struct trace_array *tr)
482 mutex_lock(&event_mutex);
483 __ftrace_clear_event_pids(tr);
484 mutex_unlock(&event_mutex);
487 static void __put_system(struct event_subsystem *system)
489 struct event_filter *filter = system->filter;
491 WARN_ON_ONCE(system_refcount(system) == 0);
492 if (system_refcount_dec(system))
495 list_del(&system->list);
498 kfree(filter->filter_string);
501 kfree_const(system->name);
505 static void __get_system(struct event_subsystem *system)
507 WARN_ON_ONCE(system_refcount(system) == 0);
508 system_refcount_inc(system);
511 static void __get_system_dir(struct trace_subsystem_dir *dir)
513 WARN_ON_ONCE(dir->ref_count == 0);
515 __get_system(dir->subsystem);
518 static void __put_system_dir(struct trace_subsystem_dir *dir)
520 WARN_ON_ONCE(dir->ref_count == 0);
521 /* If the subsystem is about to be freed, the dir must be too */
522 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
524 __put_system(dir->subsystem);
525 if (!--dir->ref_count)
529 static void put_system(struct trace_subsystem_dir *dir)
531 mutex_lock(&event_mutex);
532 __put_system_dir(dir);
533 mutex_unlock(&event_mutex);
536 static void remove_subsystem(struct trace_subsystem_dir *dir)
541 if (!--dir->nr_events) {
542 tracefs_remove_recursive(dir->entry);
543 list_del(&dir->list);
544 __put_system_dir(dir);
548 static void remove_event_file_dir(struct trace_event_file *file)
550 struct dentry *dir = file->dir;
551 struct dentry *child;
554 spin_lock(&dir->d_lock); /* probably unneeded */
555 list_for_each_entry(child, &dir->d_subdirs, d_child) {
556 if (d_really_is_positive(child)) /* probably unneeded */
557 d_inode(child)->i_private = NULL;
559 spin_unlock(&dir->d_lock);
561 tracefs_remove_recursive(dir);
564 list_del(&file->list);
565 remove_subsystem(file->system);
566 free_event_filter(file->filter);
567 kmem_cache_free(file_cachep, file);
571 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
574 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
575 const char *sub, const char *event, int set)
577 struct trace_event_file *file;
578 struct trace_event_call *call;
582 list_for_each_entry(file, &tr->events, list) {
584 call = file->event_call;
585 name = trace_event_name(call);
587 if (!name || !call->class || !call->class->reg)
590 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
594 strcmp(match, name) != 0 &&
595 strcmp(match, call->class->system) != 0)
598 if (sub && strcmp(sub, call->class->system) != 0)
601 if (event && strcmp(event, name) != 0)
604 ftrace_event_enable_disable(file, set);
612 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
613 const char *sub, const char *event, int set)
617 mutex_lock(&event_mutex);
618 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
619 mutex_unlock(&event_mutex);
624 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
626 char *event = NULL, *sub = NULL, *match;
630 * The buf format can be <subsystem>:<event-name>
631 * *:<event-name> means any event by that name.
632 * :<event-name> is the same.
634 * <subsystem>:* means all events in that subsystem
635 * <subsystem>: means the same.
637 * <name> (no ':') means all events in a subsystem with
638 * the name <name> or any event that matches <name>
641 match = strsep(&buf, ":");
647 if (!strlen(sub) || strcmp(sub, "*") == 0)
649 if (!strlen(event) || strcmp(event, "*") == 0)
653 ret = __ftrace_set_clr_event(tr, match, sub, event, set);
655 /* Put back the colon to allow this to be called again */
663 * trace_set_clr_event - enable or disable an event
664 * @system: system name to match (NULL for any system)
665 * @event: event name to match (NULL for all events, within system)
666 * @set: 1 to enable, 0 to disable
668 * This is a way for other parts of the kernel to enable or disable
671 * Returns 0 on success, -EINVAL if the parameters do not match any
674 int trace_set_clr_event(const char *system, const char *event, int set)
676 struct trace_array *tr = top_trace_array();
681 return __ftrace_set_clr_event(tr, NULL, system, event, set);
683 EXPORT_SYMBOL_GPL(trace_set_clr_event);
685 /* 128 should be much more than enough */
686 #define EVENT_BUF_SIZE 127
689 ftrace_event_write(struct file *file, const char __user *ubuf,
690 size_t cnt, loff_t *ppos)
692 struct trace_parser parser;
693 struct seq_file *m = file->private_data;
694 struct trace_array *tr = m->private;
700 ret = tracing_update_buffers();
704 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
707 read = trace_get_user(&parser, ubuf, cnt, ppos);
709 if (read >= 0 && trace_parser_loaded((&parser))) {
712 if (*parser.buffer == '!')
715 parser.buffer[parser.idx] = 0;
717 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
725 trace_parser_put(&parser);
731 t_next(struct seq_file *m, void *v, loff_t *pos)
733 struct trace_event_file *file = v;
734 struct trace_event_call *call;
735 struct trace_array *tr = m->private;
739 list_for_each_entry_continue(file, &tr->events, list) {
740 call = file->event_call;
742 * The ftrace subsystem is for showing formats only.
743 * They can not be enabled or disabled via the event files.
745 if (call->class && call->class->reg)
752 static void *t_start(struct seq_file *m, loff_t *pos)
754 struct trace_event_file *file;
755 struct trace_array *tr = m->private;
758 mutex_lock(&event_mutex);
760 file = list_entry(&tr->events, struct trace_event_file, list);
761 for (l = 0; l <= *pos; ) {
762 file = t_next(m, file, &l);
770 s_next(struct seq_file *m, void *v, loff_t *pos)
772 struct trace_event_file *file = v;
773 struct trace_array *tr = m->private;
777 list_for_each_entry_continue(file, &tr->events, list) {
778 if (file->flags & EVENT_FILE_FL_ENABLED)
785 static void *s_start(struct seq_file *m, loff_t *pos)
787 struct trace_event_file *file;
788 struct trace_array *tr = m->private;
791 mutex_lock(&event_mutex);
793 file = list_entry(&tr->events, struct trace_event_file, list);
794 for (l = 0; l <= *pos; ) {
795 file = s_next(m, file, &l);
802 static int t_show(struct seq_file *m, void *v)
804 struct trace_event_file *file = v;
805 struct trace_event_call *call = file->event_call;
807 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
808 seq_printf(m, "%s:", call->class->system);
809 seq_printf(m, "%s\n", trace_event_name(call));
814 static void t_stop(struct seq_file *m, void *p)
816 mutex_unlock(&event_mutex);
819 static void *p_start(struct seq_file *m, loff_t *pos)
821 struct trace_pid_list *pid_list;
822 struct trace_array *tr = m->private;
825 * Grab the mutex, to keep calls to p_next() having the same
826 * tr->filtered_pids as p_start() has.
827 * If we just passed the tr->filtered_pids around, then RCU would
828 * have been enough, but doing that makes things more complex.
830 mutex_lock(&event_mutex);
831 rcu_read_lock_sched();
833 pid_list = rcu_dereference_sched(tr->filtered_pids);
835 if (!pid_list || *pos >= pid_list->nr_pids)
838 return (void *)&pid_list->pids[*pos];
841 static void p_stop(struct seq_file *m, void *p)
843 rcu_read_unlock_sched();
844 mutex_unlock(&event_mutex);
848 p_next(struct seq_file *m, void *v, loff_t *pos)
850 struct trace_array *tr = m->private;
851 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
855 if (*pos >= pid_list->nr_pids)
858 return (void *)&pid_list->pids[*pos];
861 static int p_show(struct seq_file *m, void *v)
865 seq_printf(m, "%d\n", *pid);
870 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
873 struct trace_event_file *file;
877 mutex_lock(&event_mutex);
878 file = event_file_data(filp);
881 mutex_unlock(&event_mutex);
886 if (flags & EVENT_FILE_FL_ENABLED &&
887 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
890 if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
891 flags & EVENT_FILE_FL_SOFT_MODE)
896 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
900 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
903 struct trace_event_file *file;
907 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
911 ret = tracing_update_buffers();
919 mutex_lock(&event_mutex);
920 file = event_file_data(filp);
922 ret = ftrace_event_enable_disable(file, val);
923 mutex_unlock(&event_mutex);
932 return ret ? ret : cnt;
936 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
939 const char set_to_char[4] = { '?', '0', '1', 'X' };
940 struct trace_subsystem_dir *dir = filp->private_data;
941 struct event_subsystem *system = dir->subsystem;
942 struct trace_event_call *call;
943 struct trace_event_file *file;
944 struct trace_array *tr = dir->tr;
949 mutex_lock(&event_mutex);
950 list_for_each_entry(file, &tr->events, list) {
951 call = file->event_call;
952 if (!trace_event_name(call) || !call->class || !call->class->reg)
955 if (system && strcmp(call->class->system, system->name) != 0)
959 * We need to find out if all the events are set
960 * or if all events or cleared, or if we have
963 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
966 * If we have a mixture, no need to look further.
971 mutex_unlock(&event_mutex);
973 buf[0] = set_to_char[set];
976 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
982 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
985 struct trace_subsystem_dir *dir = filp->private_data;
986 struct event_subsystem *system = dir->subsystem;
987 const char *name = NULL;
991 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
995 ret = tracing_update_buffers();
999 if (val != 0 && val != 1)
1003 * Opening of "enable" adds a ref count to system,
1004 * so the name is safe to use.
1007 name = system->name;
1009 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1023 FORMAT_FIELD_SEPERATOR = 2,
1024 FORMAT_PRINTFMT = 3,
1027 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1029 struct trace_event_call *call = event_file_data(m->private);
1030 struct list_head *common_head = &ftrace_common_fields;
1031 struct list_head *head = trace_get_fields(call);
1032 struct list_head *node = v;
1036 switch ((unsigned long)v) {
1041 case FORMAT_FIELD_SEPERATOR:
1045 case FORMAT_PRINTFMT:
1051 if (node == common_head)
1052 return (void *)FORMAT_FIELD_SEPERATOR;
1053 else if (node == head)
1054 return (void *)FORMAT_PRINTFMT;
1059 static int f_show(struct seq_file *m, void *v)
1061 struct trace_event_call *call = event_file_data(m->private);
1062 struct ftrace_event_field *field;
1063 const char *array_descriptor;
1065 switch ((unsigned long)v) {
1067 seq_printf(m, "name: %s\n", trace_event_name(call));
1068 seq_printf(m, "ID: %d\n", call->event.type);
1069 seq_puts(m, "format:\n");
1072 case FORMAT_FIELD_SEPERATOR:
1076 case FORMAT_PRINTFMT:
1077 seq_printf(m, "\nprint fmt: %s\n",
1082 field = list_entry(v, struct ftrace_event_field, link);
1084 * Smartly shows the array type(except dynamic array).
1087 * If TYPE := TYPE[LEN], it is shown:
1088 * field:TYPE VAR[LEN]
1090 array_descriptor = strchr(field->type, '[');
1092 if (!strncmp(field->type, "__data_loc", 10))
1093 array_descriptor = NULL;
1095 if (!array_descriptor)
1096 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1097 field->type, field->name, field->offset,
1098 field->size, !!field->is_signed);
1100 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1101 (int)(array_descriptor - field->type),
1102 field->type, field->name,
1103 array_descriptor, field->offset,
1104 field->size, !!field->is_signed);
1109 static void *f_start(struct seq_file *m, loff_t *pos)
1111 void *p = (void *)FORMAT_HEADER;
1114 /* ->stop() is called even if ->start() fails */
1115 mutex_lock(&event_mutex);
1116 if (!event_file_data(m->private))
1117 return ERR_PTR(-ENODEV);
1119 while (l < *pos && p)
1120 p = f_next(m, p, &l);
1125 static void f_stop(struct seq_file *m, void *p)
1127 mutex_unlock(&event_mutex);
1130 static const struct seq_operations trace_format_seq_ops = {
1137 static int trace_format_open(struct inode *inode, struct file *file)
1142 ret = seq_open(file, &trace_format_seq_ops);
1146 m = file->private_data;
1153 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1155 int id = (long)event_file_data(filp);
1165 len = sprintf(buf, "%d\n", id);
1167 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1171 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1174 struct trace_event_file *file;
1175 struct trace_seq *s;
1181 s = kmalloc(sizeof(*s), GFP_KERNEL);
1188 mutex_lock(&event_mutex);
1189 file = event_file_data(filp);
1191 print_event_filter(file, s);
1192 mutex_unlock(&event_mutex);
1195 r = simple_read_from_buffer(ubuf, cnt, ppos,
1196 s->buffer, trace_seq_used(s));
1204 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1207 struct trace_event_file *file;
1211 if (cnt >= PAGE_SIZE)
1214 buf = (char *)__get_free_page(GFP_TEMPORARY);
1218 if (copy_from_user(buf, ubuf, cnt)) {
1219 free_page((unsigned long) buf);
1224 mutex_lock(&event_mutex);
1225 file = event_file_data(filp);
1227 err = apply_event_filter(file, buf);
1228 mutex_unlock(&event_mutex);
1230 free_page((unsigned long) buf);
1239 static LIST_HEAD(event_subsystems);
1241 static int subsystem_open(struct inode *inode, struct file *filp)
1243 struct event_subsystem *system = NULL;
1244 struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1245 struct trace_array *tr;
1248 if (tracing_is_disabled())
1251 /* Make sure the system still exists */
1252 mutex_lock(&trace_types_lock);
1253 mutex_lock(&event_mutex);
1254 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1255 list_for_each_entry(dir, &tr->systems, list) {
1256 if (dir == inode->i_private) {
1257 /* Don't open systems with no events */
1258 if (dir->nr_events) {
1259 __get_system_dir(dir);
1260 system = dir->subsystem;
1267 mutex_unlock(&event_mutex);
1268 mutex_unlock(&trace_types_lock);
1273 /* Some versions of gcc think dir can be uninitialized here */
1276 /* Still need to increment the ref count of the system */
1277 if (trace_array_get(tr) < 0) {
1282 ret = tracing_open_generic(inode, filp);
1284 trace_array_put(tr);
1291 static int system_tr_open(struct inode *inode, struct file *filp)
1293 struct trace_subsystem_dir *dir;
1294 struct trace_array *tr = inode->i_private;
1297 if (tracing_is_disabled())
1300 if (trace_array_get(tr) < 0)
1303 /* Make a temporary dir that has no system but points to tr */
1304 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1306 trace_array_put(tr);
1312 ret = tracing_open_generic(inode, filp);
1314 trace_array_put(tr);
1319 filp->private_data = dir;
1324 static int subsystem_release(struct inode *inode, struct file *file)
1326 struct trace_subsystem_dir *dir = file->private_data;
1328 trace_array_put(dir->tr);
1331 * If dir->subsystem is NULL, then this is a temporary
1332 * descriptor that was made for a trace_array to enable
1344 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1347 struct trace_subsystem_dir *dir = filp->private_data;
1348 struct event_subsystem *system = dir->subsystem;
1349 struct trace_seq *s;
1355 s = kmalloc(sizeof(*s), GFP_KERNEL);
1361 print_subsystem_event_filter(system, s);
1362 r = simple_read_from_buffer(ubuf, cnt, ppos,
1363 s->buffer, trace_seq_used(s));
1371 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1374 struct trace_subsystem_dir *dir = filp->private_data;
1378 if (cnt >= PAGE_SIZE)
1381 buf = (char *)__get_free_page(GFP_TEMPORARY);
1385 if (copy_from_user(buf, ubuf, cnt)) {
1386 free_page((unsigned long) buf);
1391 err = apply_subsystem_event_filter(dir, buf);
1392 free_page((unsigned long) buf);
1402 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1404 int (*func)(struct trace_seq *s) = filp->private_data;
1405 struct trace_seq *s;
1411 s = kmalloc(sizeof(*s), GFP_KERNEL);
1418 r = simple_read_from_buffer(ubuf, cnt, ppos,
1419 s->buffer, trace_seq_used(s));
1426 static int max_pids(struct trace_pid_list *pid_list)
1428 return (PAGE_SIZE << pid_list->order) / sizeof(pid_t);
1432 ftrace_event_pid_write(struct file *file, const char __user *ubuf,
1433 size_t cnt, loff_t *ppos)
1435 struct seq_file *m = file->private_data;
1436 struct trace_array *tr = m->private;
1437 struct trace_pid_list *filtered_pids = NULL;
1438 struct trace_pid_list *pid_list = NULL;
1439 struct trace_parser parser;
1450 ret = tracing_update_buffers();
1454 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1457 mutex_lock(&event_mutex);
1459 * Load as many pids into the array before doing a
1460 * swap from the tr->filtered_pids to the new list.
1466 ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
1467 if (ret < 0 || !trace_parser_loaded(&parser))
1474 parser.buffer[parser.idx] = 0;
1477 if (kstrtoul(parser.buffer, 0, &val))
1486 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
1490 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1491 lockdep_is_held(&event_mutex));
1493 pid_list->order = filtered_pids->order;
1495 pid_list->order = 0;
1497 pid_list->pids = (void *)__get_free_pages(GFP_KERNEL,
1499 if (!pid_list->pids)
1502 if (filtered_pids) {
1503 pid_list->nr_pids = filtered_pids->nr_pids;
1504 memcpy(pid_list->pids, filtered_pids->pids,
1505 pid_list->nr_pids * sizeof(pid_t));
1507 pid_list->nr_pids = 0;
1510 if (pid_list->nr_pids >= max_pids(pid_list)) {
1513 pid_page = (void *)__get_free_pages(GFP_KERNEL,
1514 pid_list->order + 1);
1517 memcpy(pid_page, pid_list->pids,
1518 pid_list->nr_pids * sizeof(pid_t));
1519 free_pages((unsigned long)pid_list->pids, pid_list->order);
1522 pid_list->pids = pid_page;
1525 pid_list->pids[pid_list->nr_pids++] = pid;
1526 trace_parser_clear(&parser);
1529 trace_parser_put(&parser);
1533 free_pages((unsigned long)pid_list->pids, pid_list->order);
1535 mutex_unlock(&event_mutex);
1540 mutex_unlock(&event_mutex);
1544 sort(pid_list->pids, pid_list->nr_pids, sizeof(pid_t), cmp_pid, NULL);
1546 /* Remove duplicates */
1547 for (i = 1; i < pid_list->nr_pids; i++) {
1550 while (i < pid_list->nr_pids &&
1551 pid_list->pids[i - 1] == pid_list->pids[i])
1555 if (i < pid_list->nr_pids) {
1556 memmove(&pid_list->pids[start], &pid_list->pids[i],
1557 (pid_list->nr_pids - i) * sizeof(pid_t));
1558 pid_list->nr_pids -= i - start;
1561 pid_list->nr_pids = start;
1565 rcu_assign_pointer(tr->filtered_pids, pid_list);
1567 mutex_unlock(&event_mutex);
1569 if (filtered_pids) {
1570 synchronize_sched();
1572 free_pages((unsigned long)filtered_pids->pids, filtered_pids->order);
1573 kfree(filtered_pids);
1582 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1583 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1584 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1585 static int ftrace_event_release(struct inode *inode, struct file *file);
1587 static const struct seq_operations show_event_seq_ops = {
1594 static const struct seq_operations show_set_event_seq_ops = {
1601 static const struct seq_operations show_set_pid_seq_ops = {
1608 static const struct file_operations ftrace_avail_fops = {
1609 .open = ftrace_event_avail_open,
1611 .llseek = seq_lseek,
1612 .release = seq_release,
1615 static const struct file_operations ftrace_set_event_fops = {
1616 .open = ftrace_event_set_open,
1618 .write = ftrace_event_write,
1619 .llseek = seq_lseek,
1620 .release = ftrace_event_release,
1623 static const struct file_operations ftrace_set_event_pid_fops = {
1624 .open = ftrace_event_set_pid_open,
1626 .write = ftrace_event_pid_write,
1627 .llseek = seq_lseek,
1628 .release = ftrace_event_release,
1631 static const struct file_operations ftrace_enable_fops = {
1632 .open = tracing_open_generic,
1633 .read = event_enable_read,
1634 .write = event_enable_write,
1635 .llseek = default_llseek,
1638 static const struct file_operations ftrace_event_format_fops = {
1639 .open = trace_format_open,
1641 .llseek = seq_lseek,
1642 .release = seq_release,
1645 static const struct file_operations ftrace_event_id_fops = {
1646 .read = event_id_read,
1647 .llseek = default_llseek,
1650 static const struct file_operations ftrace_event_filter_fops = {
1651 .open = tracing_open_generic,
1652 .read = event_filter_read,
1653 .write = event_filter_write,
1654 .llseek = default_llseek,
1657 static const struct file_operations ftrace_subsystem_filter_fops = {
1658 .open = subsystem_open,
1659 .read = subsystem_filter_read,
1660 .write = subsystem_filter_write,
1661 .llseek = default_llseek,
1662 .release = subsystem_release,
1665 static const struct file_operations ftrace_system_enable_fops = {
1666 .open = subsystem_open,
1667 .read = system_enable_read,
1668 .write = system_enable_write,
1669 .llseek = default_llseek,
1670 .release = subsystem_release,
1673 static const struct file_operations ftrace_tr_enable_fops = {
1674 .open = system_tr_open,
1675 .read = system_enable_read,
1676 .write = system_enable_write,
1677 .llseek = default_llseek,
1678 .release = subsystem_release,
1681 static const struct file_operations ftrace_show_header_fops = {
1682 .open = tracing_open_generic,
1683 .read = show_header,
1684 .llseek = default_llseek,
1688 ftrace_event_open(struct inode *inode, struct file *file,
1689 const struct seq_operations *seq_ops)
1694 ret = seq_open(file, seq_ops);
1697 m = file->private_data;
1698 /* copy tr over to seq ops */
1699 m->private = inode->i_private;
1704 static int ftrace_event_release(struct inode *inode, struct file *file)
1706 struct trace_array *tr = inode->i_private;
1708 trace_array_put(tr);
1710 return seq_release(inode, file);
1714 ftrace_event_avail_open(struct inode *inode, struct file *file)
1716 const struct seq_operations *seq_ops = &show_event_seq_ops;
1718 return ftrace_event_open(inode, file, seq_ops);
1722 ftrace_event_set_open(struct inode *inode, struct file *file)
1724 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1725 struct trace_array *tr = inode->i_private;
1728 if (trace_array_get(tr) < 0)
1731 if ((file->f_mode & FMODE_WRITE) &&
1732 (file->f_flags & O_TRUNC))
1733 ftrace_clear_events(tr);
1735 ret = ftrace_event_open(inode, file, seq_ops);
1737 trace_array_put(tr);
1742 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1744 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1745 struct trace_array *tr = inode->i_private;
1748 if (trace_array_get(tr) < 0)
1751 if ((file->f_mode & FMODE_WRITE) &&
1752 (file->f_flags & O_TRUNC))
1753 ftrace_clear_event_pids(tr);
1755 ret = ftrace_event_open(inode, file, seq_ops);
1757 trace_array_put(tr);
1761 static struct event_subsystem *
1762 create_new_subsystem(const char *name)
1764 struct event_subsystem *system;
1766 /* need to create new entry */
1767 system = kmalloc(sizeof(*system), GFP_KERNEL);
1771 system->ref_count = 1;
1773 /* Only allocate if dynamic (kprobes and modules) */
1774 system->name = kstrdup_const(name, GFP_KERNEL);
1778 system->filter = NULL;
1780 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1781 if (!system->filter)
1784 list_add(&system->list, &event_subsystems);
1789 kfree_const(system->name);
1794 static struct dentry *
1795 event_subsystem_dir(struct trace_array *tr, const char *name,
1796 struct trace_event_file *file, struct dentry *parent)
1798 struct trace_subsystem_dir *dir;
1799 struct event_subsystem *system;
1800 struct dentry *entry;
1802 /* First see if we did not already create this dir */
1803 list_for_each_entry(dir, &tr->systems, list) {
1804 system = dir->subsystem;
1805 if (strcmp(system->name, name) == 0) {
1812 /* Now see if the system itself exists. */
1813 list_for_each_entry(system, &event_subsystems, list) {
1814 if (strcmp(system->name, name) == 0)
1817 /* Reset system variable when not found */
1818 if (&system->list == &event_subsystems)
1821 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1826 system = create_new_subsystem(name);
1830 __get_system(system);
1832 dir->entry = tracefs_create_dir(name, parent);
1834 pr_warn("Failed to create system directory %s\n", name);
1835 __put_system(system);
1842 dir->subsystem = system;
1845 entry = tracefs_create_file("filter", 0644, dir->entry, dir,
1846 &ftrace_subsystem_filter_fops);
1848 kfree(system->filter);
1849 system->filter = NULL;
1850 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
1853 trace_create_file("enable", 0644, dir->entry, dir,
1854 &ftrace_system_enable_fops);
1856 list_add(&dir->list, &tr->systems);
1863 /* Only print this message if failed on memory allocation */
1864 if (!dir || !system)
1865 pr_warn("No memory to create event subsystem %s\n", name);
1870 event_create_dir(struct dentry *parent, struct trace_event_file *file)
1872 struct trace_event_call *call = file->event_call;
1873 struct trace_array *tr = file->tr;
1874 struct list_head *head;
1875 struct dentry *d_events;
1880 * If the trace point header did not define TRACE_SYSTEM
1881 * then the system would be called "TRACE_SYSTEM".
1883 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1884 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1890 name = trace_event_name(call);
1891 file->dir = tracefs_create_dir(name, d_events);
1893 pr_warn("Could not create tracefs '%s' directory\n", name);
1897 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1898 trace_create_file("enable", 0644, file->dir, file,
1899 &ftrace_enable_fops);
1901 #ifdef CONFIG_PERF_EVENTS
1902 if (call->event.type && call->class->reg)
1903 trace_create_file("id", 0444, file->dir,
1904 (void *)(long)call->event.type,
1905 &ftrace_event_id_fops);
1909 * Other events may have the same class. Only update
1910 * the fields if they are not already defined.
1912 head = trace_get_fields(call);
1913 if (list_empty(head)) {
1914 ret = call->class->define_fields(call);
1916 pr_warn("Could not initialize trace point events/%s\n",
1921 trace_create_file("filter", 0644, file->dir, file,
1922 &ftrace_event_filter_fops);
1924 trace_create_file("trigger", 0644, file->dir, file,
1925 &event_trigger_fops);
1927 trace_create_file("format", 0444, file->dir, call,
1928 &ftrace_event_format_fops);
1933 static void remove_event_from_tracers(struct trace_event_call *call)
1935 struct trace_event_file *file;
1936 struct trace_array *tr;
1938 do_for_each_event_file_safe(tr, file) {
1939 if (file->event_call != call)
1942 remove_event_file_dir(file);
1944 * The do_for_each_event_file_safe() is
1945 * a double loop. After finding the call for this
1946 * trace_array, we use break to jump to the next
1950 } while_for_each_event_file();
1953 static void event_remove(struct trace_event_call *call)
1955 struct trace_array *tr;
1956 struct trace_event_file *file;
1958 do_for_each_event_file(tr, file) {
1959 if (file->event_call != call)
1961 ftrace_event_enable_disable(file, 0);
1963 * The do_for_each_event_file() is
1964 * a double loop. After finding the call for this
1965 * trace_array, we use break to jump to the next
1969 } while_for_each_event_file();
1971 if (call->event.funcs)
1972 __unregister_trace_event(&call->event);
1973 remove_event_from_tracers(call);
1974 list_del(&call->list);
1977 static int event_init(struct trace_event_call *call)
1982 name = trace_event_name(call);
1986 if (call->class->raw_init) {
1987 ret = call->class->raw_init(call);
1988 if (ret < 0 && ret != -ENOSYS)
1989 pr_warn("Could not initialize trace events/%s\n", name);
1996 __register_event(struct trace_event_call *call, struct module *mod)
2000 ret = event_init(call);
2004 list_add(&call->list, &ftrace_events);
2010 static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
2015 /* Find the length of the enum value as a string */
2016 elen = snprintf(ptr, 0, "%ld", map->enum_value);
2017 /* Make sure there's enough room to replace the string with the value */
2021 snprintf(ptr, elen + 1, "%ld", map->enum_value);
2023 /* Get the rest of the string of ptr */
2024 rlen = strlen(ptr + len);
2025 memmove(ptr + elen, ptr + len, rlen);
2026 /* Make sure we end the new string */
2027 ptr[elen + rlen] = 0;
2032 static void update_event_printk(struct trace_event_call *call,
2033 struct trace_enum_map *map)
2037 int len = strlen(map->enum_string);
2039 for (ptr = call->print_fmt; *ptr; ptr++) {
2053 if (isdigit(*ptr)) {
2057 /* Check for alpha chars like ULL */
2058 } while (isalnum(*ptr));
2062 * A number must have some kind of delimiter after
2063 * it, and we can ignore that too.
2067 if (isalpha(*ptr) || *ptr == '_') {
2068 if (strncmp(map->enum_string, ptr, len) == 0 &&
2069 !isalnum(ptr[len]) && ptr[len] != '_') {
2070 ptr = enum_replace(ptr, map, len);
2071 /* Hmm, enum string smaller than value */
2072 if (WARN_ON_ONCE(!ptr))
2075 * No need to decrement here, as enum_replace()
2076 * returns the pointer to the character passed
2077 * the enum, and two enums can not be placed
2078 * back to back without something in between.
2079 * We can skip that something in between.
2086 } while (isalnum(*ptr) || *ptr == '_');
2090 * If what comes after this variable is a '.' or
2091 * '->' then we can continue to ignore that string.
2093 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2094 ptr += *ptr == '.' ? 1 : 2;
2100 * Once again, we can skip the delimiter that came
2108 void trace_event_enum_update(struct trace_enum_map **map, int len)
2110 struct trace_event_call *call, *p;
2111 const char *last_system = NULL;
2115 down_write(&trace_event_sem);
2116 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2117 /* events are usually grouped together with systems */
2118 if (!last_system || call->class->system != last_system) {
2120 last_system = call->class->system;
2123 for (i = last_i; i < len; i++) {
2124 if (call->class->system == map[i]->system) {
2125 /* Save the first system if need be */
2128 update_event_printk(call, map[i]);
2132 up_write(&trace_event_sem);
2135 static struct trace_event_file *
2136 trace_create_new_event(struct trace_event_call *call,
2137 struct trace_array *tr)
2139 struct trace_event_file *file;
2141 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2145 file->event_call = call;
2147 atomic_set(&file->sm_ref, 0);
2148 atomic_set(&file->tm_ref, 0);
2149 INIT_LIST_HEAD(&file->triggers);
2150 list_add(&file->list, &tr->events);
2155 /* Add an event to a trace directory */
2157 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2159 struct trace_event_file *file;
2161 file = trace_create_new_event(call, tr);
2165 return event_create_dir(tr->event_dir, file);
2169 * Just create a decriptor for early init. A descriptor is required
2170 * for enabling events at boot. We want to enable events before
2171 * the filesystem is initialized.
2174 __trace_early_add_new_event(struct trace_event_call *call,
2175 struct trace_array *tr)
2177 struct trace_event_file *file;
2179 file = trace_create_new_event(call, tr);
2186 struct ftrace_module_file_ops;
2187 static void __add_event_to_tracers(struct trace_event_call *call);
2189 /* Add an additional event_call dynamically */
2190 int trace_add_event_call(struct trace_event_call *call)
2193 mutex_lock(&trace_types_lock);
2194 mutex_lock(&event_mutex);
2196 ret = __register_event(call, NULL);
2198 __add_event_to_tracers(call);
2200 mutex_unlock(&event_mutex);
2201 mutex_unlock(&trace_types_lock);
2206 * Must be called under locking of trace_types_lock, event_mutex and
2209 static void __trace_remove_event_call(struct trace_event_call *call)
2212 trace_destroy_fields(call);
2213 free_event_filter(call->filter);
2214 call->filter = NULL;
2217 static int probe_remove_event_call(struct trace_event_call *call)
2219 struct trace_array *tr;
2220 struct trace_event_file *file;
2222 #ifdef CONFIG_PERF_EVENTS
2223 if (call->perf_refcount)
2226 do_for_each_event_file(tr, file) {
2227 if (file->event_call != call)
2230 * We can't rely on ftrace_event_enable_disable(enable => 0)
2231 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2232 * TRACE_REG_UNREGISTER.
2234 if (file->flags & EVENT_FILE_FL_ENABLED)
2237 * The do_for_each_event_file_safe() is
2238 * a double loop. After finding the call for this
2239 * trace_array, we use break to jump to the next
2243 } while_for_each_event_file();
2245 __trace_remove_event_call(call);
2250 /* Remove an event_call */
2251 int trace_remove_event_call(struct trace_event_call *call)
2255 mutex_lock(&trace_types_lock);
2256 mutex_lock(&event_mutex);
2257 down_write(&trace_event_sem);
2258 ret = probe_remove_event_call(call);
2259 up_write(&trace_event_sem);
2260 mutex_unlock(&event_mutex);
2261 mutex_unlock(&trace_types_lock);
2266 #define for_each_event(event, start, end) \
2267 for (event = start; \
2268 (unsigned long)event < (unsigned long)end; \
2271 #ifdef CONFIG_MODULES
2273 static void trace_module_add_events(struct module *mod)
2275 struct trace_event_call **call, **start, **end;
2277 if (!mod->num_trace_events)
2280 /* Don't add infrastructure for mods without tracepoints */
2281 if (trace_module_has_bad_taint(mod)) {
2282 pr_err("%s: module has bad taint, not creating trace events\n",
2287 start = mod->trace_events;
2288 end = mod->trace_events + mod->num_trace_events;
2290 for_each_event(call, start, end) {
2291 __register_event(*call, mod);
2292 __add_event_to_tracers(*call);
2296 static void trace_module_remove_events(struct module *mod)
2298 struct trace_event_call *call, *p;
2299 bool clear_trace = false;
2301 down_write(&trace_event_sem);
2302 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2303 if (call->mod == mod) {
2304 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2306 __trace_remove_event_call(call);
2309 up_write(&trace_event_sem);
2312 * It is safest to reset the ring buffer if the module being unloaded
2313 * registered any events that were used. The only worry is if
2314 * a new module gets loaded, and takes on the same id as the events
2315 * of this module. When printing out the buffer, traced events left
2316 * over from this module may be passed to the new module events and
2317 * unexpected results may occur.
2320 tracing_reset_all_online_cpus();
2323 static int trace_module_notify(struct notifier_block *self,
2324 unsigned long val, void *data)
2326 struct module *mod = data;
2328 mutex_lock(&trace_types_lock);
2329 mutex_lock(&event_mutex);
2331 case MODULE_STATE_COMING:
2332 trace_module_add_events(mod);
2334 case MODULE_STATE_GOING:
2335 trace_module_remove_events(mod);
2338 mutex_unlock(&event_mutex);
2339 mutex_unlock(&trace_types_lock);
2344 static struct notifier_block trace_module_nb = {
2345 .notifier_call = trace_module_notify,
2346 .priority = 1, /* higher than trace.c module notify */
2348 #endif /* CONFIG_MODULES */
2350 /* Create a new event directory structure for a trace directory. */
2352 __trace_add_event_dirs(struct trace_array *tr)
2354 struct trace_event_call *call;
2357 list_for_each_entry(call, &ftrace_events, list) {
2358 ret = __trace_add_new_event(call, tr);
2360 pr_warn("Could not create directory for event %s\n",
2361 trace_event_name(call));
2365 struct trace_event_file *
2366 find_event_file(struct trace_array *tr, const char *system, const char *event)
2368 struct trace_event_file *file;
2369 struct trace_event_call *call;
2372 list_for_each_entry(file, &tr->events, list) {
2374 call = file->event_call;
2375 name = trace_event_name(call);
2377 if (!name || !call->class || !call->class->reg)
2380 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2383 if (strcmp(event, name) == 0 &&
2384 strcmp(system, call->class->system) == 0)
2390 #ifdef CONFIG_DYNAMIC_FTRACE
2393 #define ENABLE_EVENT_STR "enable_event"
2394 #define DISABLE_EVENT_STR "disable_event"
2396 struct event_probe_data {
2397 struct trace_event_file *file;
2398 unsigned long count;
2404 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2406 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2407 struct event_probe_data *data = *pdata;
2413 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2415 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2419 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2421 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2422 struct event_probe_data *data = *pdata;
2430 /* Skip if the event is in a state we want to switch to */
2431 if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
2434 if (data->count != -1)
2437 event_enable_probe(ip, parent_ip, _data);
2441 event_enable_print(struct seq_file *m, unsigned long ip,
2442 struct ftrace_probe_ops *ops, void *_data)
2444 struct event_probe_data *data = _data;
2446 seq_printf(m, "%ps:", (void *)ip);
2448 seq_printf(m, "%s:%s:%s",
2449 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2450 data->file->event_call->class->system,
2451 trace_event_name(data->file->event_call));
2453 if (data->count == -1)
2454 seq_puts(m, ":unlimited\n");
2456 seq_printf(m, ":count=%ld\n", data->count);
2462 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2465 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2466 struct event_probe_data *data = *pdata;
2473 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2476 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2477 struct event_probe_data *data = *pdata;
2479 if (WARN_ON_ONCE(data->ref <= 0))
2484 /* Remove the SOFT_MODE flag */
2485 __ftrace_event_enable_disable(data->file, 0, 1);
2486 module_put(data->file->event_call->mod);
2492 static struct ftrace_probe_ops event_enable_probe_ops = {
2493 .func = event_enable_probe,
2494 .print = event_enable_print,
2495 .init = event_enable_init,
2496 .free = event_enable_free,
2499 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2500 .func = event_enable_count_probe,
2501 .print = event_enable_print,
2502 .init = event_enable_init,
2503 .free = event_enable_free,
2506 static struct ftrace_probe_ops event_disable_probe_ops = {
2507 .func = event_enable_probe,
2508 .print = event_enable_print,
2509 .init = event_enable_init,
2510 .free = event_enable_free,
2513 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2514 .func = event_enable_count_probe,
2515 .print = event_enable_print,
2516 .init = event_enable_init,
2517 .free = event_enable_free,
2521 event_enable_func(struct ftrace_hash *hash,
2522 char *glob, char *cmd, char *param, int enabled)
2524 struct trace_array *tr = top_trace_array();
2525 struct trace_event_file *file;
2526 struct ftrace_probe_ops *ops;
2527 struct event_probe_data *data;
2537 /* hash funcs only work with set_ftrace_filter */
2538 if (!enabled || !param)
2541 system = strsep(¶m, ":");
2545 event = strsep(¶m, ":");
2547 mutex_lock(&event_mutex);
2550 file = find_event_file(tr, system, event);
2554 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2557 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2559 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2561 if (glob[0] == '!') {
2562 unregister_ftrace_function_probe_func(glob+1, ops);
2568 data = kzalloc(sizeof(*data), GFP_KERNEL);
2572 data->enable = enable;
2579 number = strsep(¶m, ":");
2582 if (!strlen(number))
2586 * We use the callback data field (which is a pointer)
2589 ret = kstrtoul(number, 0, &data->count);
2594 /* Don't let event modules unload while probe registered */
2595 ret = try_module_get(file->event_call->mod);
2601 ret = __ftrace_event_enable_disable(file, 1, 1);
2604 ret = register_ftrace_function_probe(glob, ops, data);
2606 * The above returns on success the # of functions enabled,
2607 * but if it didn't find any functions it returns zero.
2608 * Consider no functions a failure too.
2615 /* Just return zero, not the number of enabled functions */
2618 mutex_unlock(&event_mutex);
2622 __ftrace_event_enable_disable(file, 0, 1);
2624 module_put(file->event_call->mod);
2630 static struct ftrace_func_command event_enable_cmd = {
2631 .name = ENABLE_EVENT_STR,
2632 .func = event_enable_func,
2635 static struct ftrace_func_command event_disable_cmd = {
2636 .name = DISABLE_EVENT_STR,
2637 .func = event_enable_func,
2640 static __init int register_event_cmds(void)
2644 ret = register_ftrace_command(&event_enable_cmd);
2645 if (WARN_ON(ret < 0))
2647 ret = register_ftrace_command(&event_disable_cmd);
2648 if (WARN_ON(ret < 0))
2649 unregister_ftrace_command(&event_enable_cmd);
2653 static inline int register_event_cmds(void) { return 0; }
2654 #endif /* CONFIG_DYNAMIC_FTRACE */
2657 * The top level array has already had its trace_event_file
2658 * descriptors created in order to allow for early events to
2659 * be recorded. This function is called after the tracefs has been
2660 * initialized, and we now have to create the files associated
2664 __trace_early_add_event_dirs(struct trace_array *tr)
2666 struct trace_event_file *file;
2670 list_for_each_entry(file, &tr->events, list) {
2671 ret = event_create_dir(tr->event_dir, file);
2673 pr_warn("Could not create directory for event %s\n",
2674 trace_event_name(file->event_call));
2679 * For early boot up, the top trace array requires to have
2680 * a list of events that can be enabled. This must be done before
2681 * the filesystem is set up in order to allow events to be traced
2685 __trace_early_add_events(struct trace_array *tr)
2687 struct trace_event_call *call;
2690 list_for_each_entry(call, &ftrace_events, list) {
2691 /* Early boot up should not have any modules loaded */
2692 if (WARN_ON_ONCE(call->mod))
2695 ret = __trace_early_add_new_event(call, tr);
2697 pr_warn("Could not create early event %s\n",
2698 trace_event_name(call));
2702 /* Remove the event directory structure for a trace directory. */
2704 __trace_remove_event_dirs(struct trace_array *tr)
2706 struct trace_event_file *file, *next;
2708 list_for_each_entry_safe(file, next, &tr->events, list)
2709 remove_event_file_dir(file);
2712 static void __add_event_to_tracers(struct trace_event_call *call)
2714 struct trace_array *tr;
2716 list_for_each_entry(tr, &ftrace_trace_arrays, list)
2717 __trace_add_new_event(call, tr);
2720 extern struct trace_event_call *__start_ftrace_events[];
2721 extern struct trace_event_call *__stop_ftrace_events[];
2723 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2725 static __init int setup_trace_event(char *str)
2727 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2728 ring_buffer_expanded = true;
2729 tracing_selftest_disabled = true;
2733 __setup("trace_event=", setup_trace_event);
2735 /* Expects to have event_mutex held when called */
2737 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2739 struct dentry *d_events;
2740 struct dentry *entry;
2742 entry = tracefs_create_file("set_event", 0644, parent,
2743 tr, &ftrace_set_event_fops);
2745 pr_warn("Could not create tracefs 'set_event' entry\n");
2749 d_events = tracefs_create_dir("events", parent);
2751 pr_warn("Could not create tracefs 'events' directory\n");
2755 entry = tracefs_create_file("set_event_pid", 0644, parent,
2756 tr, &ftrace_set_event_pid_fops);
2758 /* ring buffer internal formats */
2759 trace_create_file("header_page", 0444, d_events,
2760 ring_buffer_print_page_header,
2761 &ftrace_show_header_fops);
2763 trace_create_file("header_event", 0444, d_events,
2764 ring_buffer_print_entry_header,
2765 &ftrace_show_header_fops);
2767 trace_create_file("enable", 0644, d_events,
2768 tr, &ftrace_tr_enable_fops);
2770 tr->event_dir = d_events;
2776 * event_trace_add_tracer - add a instance of a trace_array to events
2777 * @parent: The parent dentry to place the files/directories for events in
2778 * @tr: The trace array associated with these events
2780 * When a new instance is created, it needs to set up its events
2781 * directory, as well as other files associated with events. It also
2782 * creates the event hierachry in the @parent/events directory.
2784 * Returns 0 on success.
2786 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2790 mutex_lock(&event_mutex);
2792 ret = create_event_toplevel_files(parent, tr);
2796 down_write(&trace_event_sem);
2797 __trace_add_event_dirs(tr);
2798 up_write(&trace_event_sem);
2801 mutex_unlock(&event_mutex);
2807 * The top trace array already had its file descriptors created.
2808 * Now the files themselves need to be created.
2811 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2815 mutex_lock(&event_mutex);
2817 ret = create_event_toplevel_files(parent, tr);
2821 down_write(&trace_event_sem);
2822 __trace_early_add_event_dirs(tr);
2823 up_write(&trace_event_sem);
2826 mutex_unlock(&event_mutex);
2831 int event_trace_del_tracer(struct trace_array *tr)
2833 mutex_lock(&event_mutex);
2835 /* Disable any event triggers and associated soft-disabled events */
2836 clear_event_triggers(tr);
2838 /* Clear the pid list */
2839 __ftrace_clear_event_pids(tr);
2841 /* Disable any running events */
2842 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2844 /* Access to events are within rcu_read_lock_sched() */
2845 synchronize_sched();
2847 down_write(&trace_event_sem);
2848 __trace_remove_event_dirs(tr);
2849 tracefs_remove_recursive(tr->event_dir);
2850 up_write(&trace_event_sem);
2852 tr->event_dir = NULL;
2854 mutex_unlock(&event_mutex);
2859 static __init int event_trace_memsetup(void)
2861 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2862 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
2867 early_enable_events(struct trace_array *tr, bool disable_first)
2869 char *buf = bootup_event_buf;
2874 token = strsep(&buf, ",");
2881 /* Restarting syscalls requires that we stop them first */
2883 ftrace_set_clr_event(tr, token, 0);
2885 ret = ftrace_set_clr_event(tr, token, 1);
2887 pr_warn("Failed to enable trace event: %s\n", token);
2889 /* Put back the comma to allow this to be called again */
2895 static __init int event_trace_enable(void)
2897 struct trace_array *tr = top_trace_array();
2898 struct trace_event_call **iter, *call;
2904 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2907 ret = event_init(call);
2909 list_add(&call->list, &ftrace_events);
2913 * We need the top trace array to have a working set of trace
2914 * points at early init, before the debug files and directories
2915 * are created. Create the file entries now, and attach them
2916 * to the actual file dentries later.
2918 __trace_early_add_events(tr);
2920 early_enable_events(tr, false);
2922 trace_printk_start_comm();
2924 register_event_cmds();
2926 register_trigger_cmds();
2932 * event_trace_enable() is called from trace_event_init() first to
2933 * initialize events and perhaps start any events that are on the
2934 * command line. Unfortunately, there are some events that will not
2935 * start this early, like the system call tracepoints that need
2936 * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
2937 * is called before pid 1 starts, and this flag is never set, making
2938 * the syscall tracepoint never get reached, but the event is enabled
2939 * regardless (and not doing anything).
2941 static __init int event_trace_enable_again(void)
2943 struct trace_array *tr;
2945 tr = top_trace_array();
2949 early_enable_events(tr, true);
2954 early_initcall(event_trace_enable_again);
2956 static __init int event_trace_init(void)
2958 struct trace_array *tr;
2959 struct dentry *d_tracer;
2960 struct dentry *entry;
2963 tr = top_trace_array();
2967 d_tracer = tracing_init_dentry();
2968 if (IS_ERR(d_tracer))
2971 entry = tracefs_create_file("available_events", 0444, d_tracer,
2972 tr, &ftrace_avail_fops);
2974 pr_warn("Could not create tracefs 'available_events' entry\n");
2976 if (trace_define_generic_fields())
2977 pr_warn("tracing: Failed to allocated generic fields");
2979 if (trace_define_common_fields())
2980 pr_warn("tracing: Failed to allocate common fields");
2982 ret = early_event_add_tracer(d_tracer, tr);
2986 #ifdef CONFIG_MODULES
2987 ret = register_module_notifier(&trace_module_nb);
2989 pr_warn("Failed to register trace events module notifier\n");
2994 void __init trace_event_init(void)
2996 event_trace_memsetup();
2997 init_ftrace_syscalls();
2998 event_trace_enable();
3001 fs_initcall(event_trace_init);
3003 #ifdef CONFIG_FTRACE_STARTUP_TEST
3005 static DEFINE_SPINLOCK(test_spinlock);
3006 static DEFINE_SPINLOCK(test_spinlock_irq);
3007 static DEFINE_MUTEX(test_mutex);
3009 static __init void test_work(struct work_struct *dummy)
3011 spin_lock(&test_spinlock);
3012 spin_lock_irq(&test_spinlock_irq);
3014 spin_unlock_irq(&test_spinlock_irq);
3015 spin_unlock(&test_spinlock);
3017 mutex_lock(&test_mutex);
3019 mutex_unlock(&test_mutex);
3022 static __init int event_test_thread(void *unused)
3026 test_malloc = kmalloc(1234, GFP_KERNEL);
3028 pr_info("failed to kmalloc\n");
3030 schedule_on_each_cpu(test_work);
3034 set_current_state(TASK_INTERRUPTIBLE);
3035 while (!kthread_should_stop()) {
3037 set_current_state(TASK_INTERRUPTIBLE);
3039 __set_current_state(TASK_RUNNING);
3045 * Do various things that may trigger events.
3047 static __init void event_test_stuff(void)
3049 struct task_struct *test_thread;
3051 test_thread = kthread_run(event_test_thread, NULL, "test-events");
3053 kthread_stop(test_thread);
3057 * For every trace event defined, we will test each trace point separately,
3058 * and then by groups, and finally all trace points.
3060 static __init void event_trace_self_tests(void)
3062 struct trace_subsystem_dir *dir;
3063 struct trace_event_file *file;
3064 struct trace_event_call *call;
3065 struct event_subsystem *system;
3066 struct trace_array *tr;
3069 tr = top_trace_array();
3073 pr_info("Running tests on trace events:\n");
3075 list_for_each_entry(file, &tr->events, list) {
3077 call = file->event_call;
3079 /* Only test those that have a probe */
3080 if (!call->class || !call->class->probe)
3084 * Testing syscall events here is pretty useless, but
3085 * we still do it if configured. But this is time consuming.
3086 * What we really need is a user thread to perform the
3087 * syscalls as we test.
3089 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3090 if (call->class->system &&
3091 strcmp(call->class->system, "syscalls") == 0)
3095 pr_info("Testing event %s: ", trace_event_name(call));
3098 * If an event is already enabled, someone is using
3099 * it and the self test should not be on.
3101 if (file->flags & EVENT_FILE_FL_ENABLED) {
3102 pr_warn("Enabled event during self test!\n");
3107 ftrace_event_enable_disable(file, 1);
3109 ftrace_event_enable_disable(file, 0);
3114 /* Now test at the sub system level */
3116 pr_info("Running tests on trace event systems:\n");
3118 list_for_each_entry(dir, &tr->systems, list) {
3120 system = dir->subsystem;
3122 /* the ftrace system is special, skip it */
3123 if (strcmp(system->name, "ftrace") == 0)
3126 pr_info("Testing event system %s: ", system->name);
3128 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3129 if (WARN_ON_ONCE(ret)) {
3130 pr_warn("error enabling system %s\n",
3137 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3138 if (WARN_ON_ONCE(ret)) {
3139 pr_warn("error disabling system %s\n",
3147 /* Test with all events enabled */
3149 pr_info("Running tests on all trace events:\n");
3150 pr_info("Testing all events: ");
3152 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3153 if (WARN_ON_ONCE(ret)) {
3154 pr_warn("error enabling all events\n");
3161 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3162 if (WARN_ON_ONCE(ret)) {
3163 pr_warn("error disabling all events\n");
3170 #ifdef CONFIG_FUNCTION_TRACER
3172 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
3174 static struct trace_array *event_tr;
3177 function_test_events_call(unsigned long ip, unsigned long parent_ip,
3178 struct ftrace_ops *op, struct pt_regs *pt_regs)
3180 struct ring_buffer_event *event;
3181 struct ring_buffer *buffer;
3182 struct ftrace_entry *entry;
3183 unsigned long flags;
3188 pc = preempt_count();
3189 preempt_disable_notrace();
3190 cpu = raw_smp_processor_id();
3191 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
3196 local_save_flags(flags);
3198 event = trace_current_buffer_lock_reserve(&buffer,
3199 TRACE_FN, sizeof(*entry),
3203 entry = ring_buffer_event_data(event);
3205 entry->parent_ip = parent_ip;
3207 trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
3210 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
3211 preempt_enable_notrace();
3214 static struct ftrace_ops trace_ops __initdata =
3216 .func = function_test_events_call,
3217 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
3220 static __init void event_trace_self_test_with_function(void)
3223 event_tr = top_trace_array();
3224 if (WARN_ON(!event_tr))
3226 ret = register_ftrace_function(&trace_ops);
3227 if (WARN_ON(ret < 0)) {
3228 pr_info("Failed to enable function tracer for event tests\n");
3231 pr_info("Running tests again, along with the function tracer\n");
3232 event_trace_self_tests();
3233 unregister_ftrace_function(&trace_ops);
3236 static __init void event_trace_self_test_with_function(void)
3241 static __init int event_trace_self_tests_init(void)
3243 if (!tracing_selftest_disabled) {
3244 event_trace_self_tests();
3245 event_trace_self_test_with_function();
3251 late_initcall(event_trace_self_tests_init);