1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
53 #include "trace_output.h"
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
59 bool ring_buffer_expanded;
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
68 static bool __read_mostly tracing_selftest_running;
71 * If boot-time tracing including tracers/events via kernel cmdline
72 * is running, we do not want to run SELFTEST.
74 bool __read_mostly tracing_selftest_disabled;
76 #ifdef CONFIG_FTRACE_STARTUP_TEST
77 void __init disable_tracing_selftest(const char *reason)
79 if (!tracing_selftest_disabled) {
80 tracing_selftest_disabled = true;
81 pr_info("Ftrace startup test is disabled due to %s\n", reason);
86 /* Pipe tracepoints to printk */
87 struct trace_iterator *tracepoint_print_iter;
88 int tracepoint_printk;
89 static bool tracepoint_printk_stop_on_boot __initdata;
90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
92 /* For tracers that don't implement custom flags */
93 static struct tracer_opt dummy_tracer_opt[] = {
98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
104 * To prevent the comm cache from being overwritten when no
105 * tracing is active, only save the comm when a trace event
108 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
111 * Kill all tracing for good (never come back).
112 * It is initialized to 1 but will turn to zero if the initialization
113 * of the tracer is successful. But that is the only place that sets
116 static int tracing_disabled = 1;
118 cpumask_var_t __read_mostly tracing_buffer_mask;
121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
123 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
124 * is set, then ftrace_dump is called. This will output the contents
125 * of the ftrace buffers to the console. This is very useful for
126 * capturing traces that lead to crashes and outputing it to a
129 * It is default off, but you can enable it with either specifying
130 * "ftrace_dump_on_oops" in the kernel command line, or setting
131 * /proc/sys/kernel/ftrace_dump_on_oops
132 * Set 1 if you want to dump buffers of all CPUs
133 * Set 2 if you want to dump the buffer of the CPU that triggered oops
136 enum ftrace_dump_mode ftrace_dump_on_oops;
138 /* When set, tracing will stop when a WARN*() is hit */
139 int __disable_trace_on_warning;
141 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
142 /* Map of enums to their values, for "eval_map" file */
143 struct trace_eval_map_head {
145 unsigned long length;
148 union trace_eval_map_item;
150 struct trace_eval_map_tail {
152 * "end" is first and points to NULL as it must be different
153 * than "mod" or "eval_string"
155 union trace_eval_map_item *next;
156 const char *end; /* points to NULL */
159 static DEFINE_MUTEX(trace_eval_mutex);
162 * The trace_eval_maps are saved in an array with two extra elements,
163 * one at the beginning, and one at the end. The beginning item contains
164 * the count of the saved maps (head.length), and the module they
165 * belong to if not built in (head.mod). The ending item contains a
166 * pointer to the next array of saved eval_map items.
168 union trace_eval_map_item {
169 struct trace_eval_map map;
170 struct trace_eval_map_head head;
171 struct trace_eval_map_tail tail;
174 static union trace_eval_map_item *trace_eval_maps;
175 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
177 int tracing_set_tracer(struct trace_array *tr, const char *buf);
178 static void ftrace_trace_userstack(struct trace_array *tr,
179 struct trace_buffer *buffer,
180 unsigned int trace_ctx);
182 #define MAX_TRACER_SIZE 100
183 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
184 static char *default_bootup_tracer;
186 static bool allocate_snapshot;
188 static int __init set_cmdline_ftrace(char *str)
190 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
191 default_bootup_tracer = bootup_tracer_buf;
192 /* We are using ftrace early, expand it */
193 ring_buffer_expanded = true;
196 __setup("ftrace=", set_cmdline_ftrace);
198 static int __init set_ftrace_dump_on_oops(char *str)
200 if (*str++ != '=' || !*str || !strcmp("1", str)) {
201 ftrace_dump_on_oops = DUMP_ALL;
205 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
206 ftrace_dump_on_oops = DUMP_ORIG;
212 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
214 static int __init stop_trace_on_warning(char *str)
216 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
217 __disable_trace_on_warning = 1;
220 __setup("traceoff_on_warning", stop_trace_on_warning);
222 static int __init boot_alloc_snapshot(char *str)
224 allocate_snapshot = true;
225 /* We also need the main ring buffer expanded */
226 ring_buffer_expanded = true;
229 __setup("alloc_snapshot", boot_alloc_snapshot);
232 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
234 static int __init set_trace_boot_options(char *str)
236 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
239 __setup("trace_options=", set_trace_boot_options);
241 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
242 static char *trace_boot_clock __initdata;
244 static int __init set_trace_boot_clock(char *str)
246 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
247 trace_boot_clock = trace_boot_clock_buf;
250 __setup("trace_clock=", set_trace_boot_clock);
252 static int __init set_tracepoint_printk(char *str)
254 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
255 tracepoint_printk = 1;
258 __setup("tp_printk", set_tracepoint_printk);
260 static int __init set_tracepoint_printk_stop(char *str)
262 tracepoint_printk_stop_on_boot = true;
265 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
267 unsigned long long ns2usecs(u64 nsec)
275 trace_process_export(struct trace_export *export,
276 struct ring_buffer_event *event, int flag)
278 struct trace_entry *entry;
279 unsigned int size = 0;
281 if (export->flags & flag) {
282 entry = ring_buffer_event_data(event);
283 size = ring_buffer_event_length(event);
284 export->write(export, entry, size);
288 static DEFINE_MUTEX(ftrace_export_lock);
290 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
292 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
293 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
294 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
296 static inline void ftrace_exports_enable(struct trace_export *export)
298 if (export->flags & TRACE_EXPORT_FUNCTION)
299 static_branch_inc(&trace_function_exports_enabled);
301 if (export->flags & TRACE_EXPORT_EVENT)
302 static_branch_inc(&trace_event_exports_enabled);
304 if (export->flags & TRACE_EXPORT_MARKER)
305 static_branch_inc(&trace_marker_exports_enabled);
308 static inline void ftrace_exports_disable(struct trace_export *export)
310 if (export->flags & TRACE_EXPORT_FUNCTION)
311 static_branch_dec(&trace_function_exports_enabled);
313 if (export->flags & TRACE_EXPORT_EVENT)
314 static_branch_dec(&trace_event_exports_enabled);
316 if (export->flags & TRACE_EXPORT_MARKER)
317 static_branch_dec(&trace_marker_exports_enabled);
320 static void ftrace_exports(struct ring_buffer_event *event, int flag)
322 struct trace_export *export;
324 preempt_disable_notrace();
326 export = rcu_dereference_raw_check(ftrace_exports_list);
328 trace_process_export(export, event, flag);
329 export = rcu_dereference_raw_check(export->next);
332 preempt_enable_notrace();
336 add_trace_export(struct trace_export **list, struct trace_export *export)
338 rcu_assign_pointer(export->next, *list);
340 * We are entering export into the list but another
341 * CPU might be walking that list. We need to make sure
342 * the export->next pointer is valid before another CPU sees
343 * the export pointer included into the list.
345 rcu_assign_pointer(*list, export);
349 rm_trace_export(struct trace_export **list, struct trace_export *export)
351 struct trace_export **p;
353 for (p = list; *p != NULL; p = &(*p)->next)
360 rcu_assign_pointer(*p, (*p)->next);
366 add_ftrace_export(struct trace_export **list, struct trace_export *export)
368 ftrace_exports_enable(export);
370 add_trace_export(list, export);
374 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
378 ret = rm_trace_export(list, export);
379 ftrace_exports_disable(export);
384 int register_ftrace_export(struct trace_export *export)
386 if (WARN_ON_ONCE(!export->write))
389 mutex_lock(&ftrace_export_lock);
391 add_ftrace_export(&ftrace_exports_list, export);
393 mutex_unlock(&ftrace_export_lock);
397 EXPORT_SYMBOL_GPL(register_ftrace_export);
399 int unregister_ftrace_export(struct trace_export *export)
403 mutex_lock(&ftrace_export_lock);
405 ret = rm_ftrace_export(&ftrace_exports_list, export);
407 mutex_unlock(&ftrace_export_lock);
411 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
413 /* trace_flags holds trace_options default values */
414 #define TRACE_DEFAULT_FLAGS \
415 (FUNCTION_DEFAULT_FLAGS | \
416 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
417 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
418 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
419 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
422 /* trace_options that are only supported by global_trace */
423 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
424 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
426 /* trace_flags that are default zero for instances */
427 #define ZEROED_TRACE_FLAGS \
428 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
431 * The global_trace is the descriptor that holds the top-level tracing
432 * buffers for the live tracing.
434 static struct trace_array global_trace = {
435 .trace_flags = TRACE_DEFAULT_FLAGS,
438 LIST_HEAD(ftrace_trace_arrays);
440 int trace_array_get(struct trace_array *this_tr)
442 struct trace_array *tr;
445 mutex_lock(&trace_types_lock);
446 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
453 mutex_unlock(&trace_types_lock);
458 static void __trace_array_put(struct trace_array *this_tr)
460 WARN_ON(!this_tr->ref);
465 * trace_array_put - Decrement the reference counter for this trace array.
466 * @this_tr : pointer to the trace array
468 * NOTE: Use this when we no longer need the trace array returned by
469 * trace_array_get_by_name(). This ensures the trace array can be later
473 void trace_array_put(struct trace_array *this_tr)
478 mutex_lock(&trace_types_lock);
479 __trace_array_put(this_tr);
480 mutex_unlock(&trace_types_lock);
482 EXPORT_SYMBOL_GPL(trace_array_put);
484 int tracing_check_open_get_tr(struct trace_array *tr)
488 ret = security_locked_down(LOCKDOWN_TRACEFS);
492 if (tracing_disabled)
495 if (tr && trace_array_get(tr) < 0)
501 int call_filter_check_discard(struct trace_event_call *call, void *rec,
502 struct trace_buffer *buffer,
503 struct ring_buffer_event *event)
505 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
506 !filter_match_preds(call->filter, rec)) {
507 __trace_event_discard_commit(buffer, event);
514 void trace_free_pid_list(struct trace_pid_list *pid_list)
516 vfree(pid_list->pids);
521 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
522 * @filtered_pids: The list of pids to check
523 * @search_pid: The PID to find in @filtered_pids
525 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
528 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
531 * If pid_max changed after filtered_pids was created, we
532 * by default ignore all pids greater than the previous pid_max.
534 if (search_pid >= filtered_pids->pid_max)
537 return test_bit(search_pid, filtered_pids->pids);
541 * trace_ignore_this_task - should a task be ignored for tracing
542 * @filtered_pids: The list of pids to check
543 * @filtered_no_pids: The list of pids not to be traced
544 * @task: The task that should be ignored if not filtered
546 * Checks if @task should be traced or not from @filtered_pids.
547 * Returns true if @task should *NOT* be traced.
548 * Returns false if @task should be traced.
551 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
552 struct trace_pid_list *filtered_no_pids,
553 struct task_struct *task)
556 * If filtered_no_pids is not empty, and the task's pid is listed
557 * in filtered_no_pids, then return true.
558 * Otherwise, if filtered_pids is empty, that means we can
559 * trace all tasks. If it has content, then only trace pids
560 * within filtered_pids.
563 return (filtered_pids &&
564 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
566 trace_find_filtered_pid(filtered_no_pids, task->pid));
570 * trace_filter_add_remove_task - Add or remove a task from a pid_list
571 * @pid_list: The list to modify
572 * @self: The current task for fork or NULL for exit
573 * @task: The task to add or remove
575 * If adding a task, if @self is defined, the task is only added if @self
576 * is also included in @pid_list. This happens on fork and tasks should
577 * only be added when the parent is listed. If @self is NULL, then the
578 * @task pid will be removed from the list, which would happen on exit
581 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
582 struct task_struct *self,
583 struct task_struct *task)
588 /* For forks, we only add if the forking task is listed */
590 if (!trace_find_filtered_pid(pid_list, self->pid))
594 /* Sorry, but we don't support pid_max changing after setting */
595 if (task->pid >= pid_list->pid_max)
598 /* "self" is set for forks, and NULL for exits */
600 set_bit(task->pid, pid_list->pids);
602 clear_bit(task->pid, pid_list->pids);
606 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
607 * @pid_list: The pid list to show
608 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
609 * @pos: The position of the file
611 * This is used by the seq_file "next" operation to iterate the pids
612 * listed in a trace_pid_list structure.
614 * Returns the pid+1 as we want to display pid of zero, but NULL would
615 * stop the iteration.
617 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
619 unsigned long pid = (unsigned long)v;
623 /* pid already is +1 of the actual previous bit */
624 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
626 /* Return pid + 1 to allow zero to be represented */
627 if (pid < pid_list->pid_max)
628 return (void *)(pid + 1);
634 * trace_pid_start - Used for seq_file to start reading pid lists
635 * @pid_list: The pid list to show
636 * @pos: The position of the file
638 * This is used by seq_file "start" operation to start the iteration
641 * Returns the pid+1 as we want to display pid of zero, but NULL would
642 * stop the iteration.
644 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
649 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
650 if (pid >= pid_list->pid_max)
653 /* Return pid + 1 so that zero can be the exit value */
654 for (pid++; pid && l < *pos;
655 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
661 * trace_pid_show - show the current pid in seq_file processing
662 * @m: The seq_file structure to write into
663 * @v: A void pointer of the pid (+1) value to display
665 * Can be directly used by seq_file operations to display the current
668 int trace_pid_show(struct seq_file *m, void *v)
670 unsigned long pid = (unsigned long)v - 1;
672 seq_printf(m, "%lu\n", pid);
676 /* 128 should be much more than enough */
677 #define PID_BUF_SIZE 127
679 int trace_pid_write(struct trace_pid_list *filtered_pids,
680 struct trace_pid_list **new_pid_list,
681 const char __user *ubuf, size_t cnt)
683 struct trace_pid_list *pid_list;
684 struct trace_parser parser;
692 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
696 * Always recreate a new array. The write is an all or nothing
697 * operation. Always create a new array when adding new pids by
698 * the user. If the operation fails, then the current list is
701 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
703 trace_parser_put(&parser);
707 pid_list->pid_max = READ_ONCE(pid_max);
709 /* Only truncating will shrink pid_max */
710 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
711 pid_list->pid_max = filtered_pids->pid_max;
713 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
714 if (!pid_list->pids) {
715 trace_parser_put(&parser);
721 /* copy the current bits to the new max */
722 for_each_set_bit(pid, filtered_pids->pids,
723 filtered_pids->pid_max) {
724 set_bit(pid, pid_list->pids);
733 ret = trace_get_user(&parser, ubuf, cnt, &pos);
734 if (ret < 0 || !trace_parser_loaded(&parser))
742 if (kstrtoul(parser.buffer, 0, &val))
744 if (val >= pid_list->pid_max)
749 set_bit(pid, pid_list->pids);
752 trace_parser_clear(&parser);
755 trace_parser_put(&parser);
758 trace_free_pid_list(pid_list);
763 /* Cleared the list of pids */
764 trace_free_pid_list(pid_list);
769 *new_pid_list = pid_list;
774 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
778 /* Early boot up does not have a buffer yet */
780 return trace_clock_local();
782 ts = ring_buffer_time_stamp(buf->buffer);
783 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
788 u64 ftrace_now(int cpu)
790 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
794 * tracing_is_enabled - Show if global_trace has been enabled
796 * Shows if the global trace has been enabled or not. It uses the
797 * mirror flag "buffer_disabled" to be used in fast paths such as for
798 * the irqsoff tracer. But it may be inaccurate due to races. If you
799 * need to know the accurate state, use tracing_is_on() which is a little
800 * slower, but accurate.
802 int tracing_is_enabled(void)
805 * For quick access (irqsoff uses this in fast path), just
806 * return the mirror variable of the state of the ring buffer.
807 * It's a little racy, but we don't really care.
810 return !global_trace.buffer_disabled;
814 * trace_buf_size is the size in bytes that is allocated
815 * for a buffer. Note, the number of bytes is always rounded
818 * This number is purposely set to a low number of 16384.
819 * If the dump on oops happens, it will be much appreciated
820 * to not have to wait for all that output. Anyway this can be
821 * boot time and run time configurable.
823 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
825 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
827 /* trace_types holds a link list of available tracers. */
828 static struct tracer *trace_types __read_mostly;
831 * trace_types_lock is used to protect the trace_types list.
833 DEFINE_MUTEX(trace_types_lock);
836 * serialize the access of the ring buffer
838 * ring buffer serializes readers, but it is low level protection.
839 * The validity of the events (which returns by ring_buffer_peek() ..etc)
840 * are not protected by ring buffer.
842 * The content of events may become garbage if we allow other process consumes
843 * these events concurrently:
844 * A) the page of the consumed events may become a normal page
845 * (not reader page) in ring buffer, and this page will be rewritten
846 * by events producer.
847 * B) The page of the consumed events may become a page for splice_read,
848 * and this page will be returned to system.
850 * These primitives allow multi process access to different cpu ring buffer
853 * These primitives don't distinguish read-only and read-consume access.
854 * Multi read-only access are also serialized.
858 static DECLARE_RWSEM(all_cpu_access_lock);
859 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
861 static inline void trace_access_lock(int cpu)
863 if (cpu == RING_BUFFER_ALL_CPUS) {
864 /* gain it for accessing the whole ring buffer. */
865 down_write(&all_cpu_access_lock);
867 /* gain it for accessing a cpu ring buffer. */
869 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
870 down_read(&all_cpu_access_lock);
872 /* Secondly block other access to this @cpu ring buffer. */
873 mutex_lock(&per_cpu(cpu_access_lock, cpu));
877 static inline void trace_access_unlock(int cpu)
879 if (cpu == RING_BUFFER_ALL_CPUS) {
880 up_write(&all_cpu_access_lock);
882 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
883 up_read(&all_cpu_access_lock);
887 static inline void trace_access_lock_init(void)
891 for_each_possible_cpu(cpu)
892 mutex_init(&per_cpu(cpu_access_lock, cpu));
897 static DEFINE_MUTEX(access_lock);
899 static inline void trace_access_lock(int cpu)
902 mutex_lock(&access_lock);
905 static inline void trace_access_unlock(int cpu)
908 mutex_unlock(&access_lock);
911 static inline void trace_access_lock_init(void)
917 #ifdef CONFIG_STACKTRACE
918 static void __ftrace_trace_stack(struct trace_buffer *buffer,
919 unsigned int trace_ctx,
920 int skip, struct pt_regs *regs);
921 static inline void ftrace_trace_stack(struct trace_array *tr,
922 struct trace_buffer *buffer,
923 unsigned int trace_ctx,
924 int skip, struct pt_regs *regs);
927 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
928 unsigned int trace_ctx,
929 int skip, struct pt_regs *regs)
932 static inline void ftrace_trace_stack(struct trace_array *tr,
933 struct trace_buffer *buffer,
934 unsigned long trace_ctx,
935 int skip, struct pt_regs *regs)
941 static __always_inline void
942 trace_event_setup(struct ring_buffer_event *event,
943 int type, unsigned int trace_ctx)
945 struct trace_entry *ent = ring_buffer_event_data(event);
947 tracing_generic_entry_update(ent, type, trace_ctx);
950 static __always_inline struct ring_buffer_event *
951 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
954 unsigned int trace_ctx)
956 struct ring_buffer_event *event;
958 event = ring_buffer_lock_reserve(buffer, len);
960 trace_event_setup(event, type, trace_ctx);
965 void tracer_tracing_on(struct trace_array *tr)
967 if (tr->array_buffer.buffer)
968 ring_buffer_record_on(tr->array_buffer.buffer);
970 * This flag is looked at when buffers haven't been allocated
971 * yet, or by some tracers (like irqsoff), that just want to
972 * know if the ring buffer has been disabled, but it can handle
973 * races of where it gets disabled but we still do a record.
974 * As the check is in the fast path of the tracers, it is more
975 * important to be fast than accurate.
977 tr->buffer_disabled = 0;
978 /* Make the flag seen by readers */
983 * tracing_on - enable tracing buffers
985 * This function enables tracing buffers that may have been
986 * disabled with tracing_off.
988 void tracing_on(void)
990 tracer_tracing_on(&global_trace);
992 EXPORT_SYMBOL_GPL(tracing_on);
995 static __always_inline void
996 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
998 __this_cpu_write(trace_taskinfo_save, true);
1000 /* If this is the temp buffer, we need to commit fully */
1001 if (this_cpu_read(trace_buffered_event) == event) {
1002 /* Length is in event->array[0] */
1003 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1004 /* Release the temp buffer */
1005 this_cpu_dec(trace_buffered_event_cnt);
1007 ring_buffer_unlock_commit(buffer, event);
1011 * __trace_puts - write a constant string into the trace buffer.
1012 * @ip: The address of the caller
1013 * @str: The constant string to write
1014 * @size: The size of the string.
1016 int __trace_puts(unsigned long ip, const char *str, int size)
1018 struct ring_buffer_event *event;
1019 struct trace_buffer *buffer;
1020 struct print_entry *entry;
1021 unsigned int trace_ctx;
1024 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1027 if (unlikely(tracing_selftest_running || tracing_disabled))
1030 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1032 trace_ctx = tracing_gen_ctx();
1033 buffer = global_trace.array_buffer.buffer;
1034 ring_buffer_nest_start(buffer);
1035 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1042 entry = ring_buffer_event_data(event);
1045 memcpy(&entry->buf, str, size);
1047 /* Add a newline if necessary */
1048 if (entry->buf[size - 1] != '\n') {
1049 entry->buf[size] = '\n';
1050 entry->buf[size + 1] = '\0';
1052 entry->buf[size] = '\0';
1054 __buffer_unlock_commit(buffer, event);
1055 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1057 ring_buffer_nest_end(buffer);
1060 EXPORT_SYMBOL_GPL(__trace_puts);
1063 * __trace_bputs - write the pointer to a constant string into trace buffer
1064 * @ip: The address of the caller
1065 * @str: The constant string to write to the buffer to
1067 int __trace_bputs(unsigned long ip, const char *str)
1069 struct ring_buffer_event *event;
1070 struct trace_buffer *buffer;
1071 struct bputs_entry *entry;
1072 unsigned int trace_ctx;
1073 int size = sizeof(struct bputs_entry);
1076 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1079 if (unlikely(tracing_selftest_running || tracing_disabled))
1082 trace_ctx = tracing_gen_ctx();
1083 buffer = global_trace.array_buffer.buffer;
1085 ring_buffer_nest_start(buffer);
1086 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1091 entry = ring_buffer_event_data(event);
1095 __buffer_unlock_commit(buffer, event);
1096 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1100 ring_buffer_nest_end(buffer);
1103 EXPORT_SYMBOL_GPL(__trace_bputs);
1105 #ifdef CONFIG_TRACER_SNAPSHOT
1106 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1109 struct tracer *tracer = tr->current_trace;
1110 unsigned long flags;
1113 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1114 internal_trace_puts("*** snapshot is being ignored ***\n");
1118 if (!tr->allocated_snapshot) {
1119 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1120 internal_trace_puts("*** stopping trace here! ***\n");
1125 /* Note, snapshot can not be used when the tracer uses it */
1126 if (tracer->use_max_tr) {
1127 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1128 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1132 local_irq_save(flags);
1133 update_max_tr(tr, current, smp_processor_id(), cond_data);
1134 local_irq_restore(flags);
1137 void tracing_snapshot_instance(struct trace_array *tr)
1139 tracing_snapshot_instance_cond(tr, NULL);
1143 * tracing_snapshot - take a snapshot of the current buffer.
1145 * This causes a swap between the snapshot buffer and the current live
1146 * tracing buffer. You can use this to take snapshots of the live
1147 * trace when some condition is triggered, but continue to trace.
1149 * Note, make sure to allocate the snapshot with either
1150 * a tracing_snapshot_alloc(), or by doing it manually
1151 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1153 * If the snapshot buffer is not allocated, it will stop tracing.
1154 * Basically making a permanent snapshot.
1156 void tracing_snapshot(void)
1158 struct trace_array *tr = &global_trace;
1160 tracing_snapshot_instance(tr);
1162 EXPORT_SYMBOL_GPL(tracing_snapshot);
1165 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1166 * @tr: The tracing instance to snapshot
1167 * @cond_data: The data to be tested conditionally, and possibly saved
1169 * This is the same as tracing_snapshot() except that the snapshot is
1170 * conditional - the snapshot will only happen if the
1171 * cond_snapshot.update() implementation receiving the cond_data
1172 * returns true, which means that the trace array's cond_snapshot
1173 * update() operation used the cond_data to determine whether the
1174 * snapshot should be taken, and if it was, presumably saved it along
1175 * with the snapshot.
1177 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1179 tracing_snapshot_instance_cond(tr, cond_data);
1181 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1184 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1185 * @tr: The tracing instance
1187 * When the user enables a conditional snapshot using
1188 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1189 * with the snapshot. This accessor is used to retrieve it.
1191 * Should not be called from cond_snapshot.update(), since it takes
1192 * the tr->max_lock lock, which the code calling
1193 * cond_snapshot.update() has already done.
1195 * Returns the cond_data associated with the trace array's snapshot.
1197 void *tracing_cond_snapshot_data(struct trace_array *tr)
1199 void *cond_data = NULL;
1201 arch_spin_lock(&tr->max_lock);
1203 if (tr->cond_snapshot)
1204 cond_data = tr->cond_snapshot->cond_data;
1206 arch_spin_unlock(&tr->max_lock);
1210 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1212 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1213 struct array_buffer *size_buf, int cpu_id);
1214 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1216 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1220 if (!tr->allocated_snapshot) {
1222 /* allocate spare buffer */
1223 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1224 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1228 tr->allocated_snapshot = true;
1234 static void free_snapshot(struct trace_array *tr)
1237 * We don't free the ring buffer. instead, resize it because
1238 * The max_tr ring buffer has some state (e.g. ring->clock) and
1239 * we want preserve it.
1241 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1242 set_buffer_entries(&tr->max_buffer, 1);
1243 tracing_reset_online_cpus(&tr->max_buffer);
1244 tr->allocated_snapshot = false;
1248 * tracing_alloc_snapshot - allocate snapshot buffer.
1250 * This only allocates the snapshot buffer if it isn't already
1251 * allocated - it doesn't also take a snapshot.
1253 * This is meant to be used in cases where the snapshot buffer needs
1254 * to be set up for events that can't sleep but need to be able to
1255 * trigger a snapshot.
1257 int tracing_alloc_snapshot(void)
1259 struct trace_array *tr = &global_trace;
1262 ret = tracing_alloc_snapshot_instance(tr);
1267 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1270 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1272 * This is similar to tracing_snapshot(), but it will allocate the
1273 * snapshot buffer if it isn't already allocated. Use this only
1274 * where it is safe to sleep, as the allocation may sleep.
1276 * This causes a swap between the snapshot buffer and the current live
1277 * tracing buffer. You can use this to take snapshots of the live
1278 * trace when some condition is triggered, but continue to trace.
1280 void tracing_snapshot_alloc(void)
1284 ret = tracing_alloc_snapshot();
1290 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1293 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1294 * @tr: The tracing instance
1295 * @cond_data: User data to associate with the snapshot
1296 * @update: Implementation of the cond_snapshot update function
1298 * Check whether the conditional snapshot for the given instance has
1299 * already been enabled, or if the current tracer is already using a
1300 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1301 * save the cond_data and update function inside.
1303 * Returns 0 if successful, error otherwise.
1305 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1306 cond_update_fn_t update)
1308 struct cond_snapshot *cond_snapshot;
1311 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1315 cond_snapshot->cond_data = cond_data;
1316 cond_snapshot->update = update;
1318 mutex_lock(&trace_types_lock);
1320 ret = tracing_alloc_snapshot_instance(tr);
1324 if (tr->current_trace->use_max_tr) {
1330 * The cond_snapshot can only change to NULL without the
1331 * trace_types_lock. We don't care if we race with it going
1332 * to NULL, but we want to make sure that it's not set to
1333 * something other than NULL when we get here, which we can
1334 * do safely with only holding the trace_types_lock and not
1335 * having to take the max_lock.
1337 if (tr->cond_snapshot) {
1342 arch_spin_lock(&tr->max_lock);
1343 tr->cond_snapshot = cond_snapshot;
1344 arch_spin_unlock(&tr->max_lock);
1346 mutex_unlock(&trace_types_lock);
1351 mutex_unlock(&trace_types_lock);
1352 kfree(cond_snapshot);
1355 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1358 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1359 * @tr: The tracing instance
1361 * Check whether the conditional snapshot for the given instance is
1362 * enabled; if so, free the cond_snapshot associated with it,
1363 * otherwise return -EINVAL.
1365 * Returns 0 if successful, error otherwise.
1367 int tracing_snapshot_cond_disable(struct trace_array *tr)
1371 arch_spin_lock(&tr->max_lock);
1373 if (!tr->cond_snapshot)
1376 kfree(tr->cond_snapshot);
1377 tr->cond_snapshot = NULL;
1380 arch_spin_unlock(&tr->max_lock);
1384 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1386 void tracing_snapshot(void)
1388 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1390 EXPORT_SYMBOL_GPL(tracing_snapshot);
1391 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1393 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1395 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1396 int tracing_alloc_snapshot(void)
1398 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1401 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1402 void tracing_snapshot_alloc(void)
1407 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1408 void *tracing_cond_snapshot_data(struct trace_array *tr)
1412 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1413 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1417 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1418 int tracing_snapshot_cond_disable(struct trace_array *tr)
1422 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1423 #endif /* CONFIG_TRACER_SNAPSHOT */
1425 void tracer_tracing_off(struct trace_array *tr)
1427 if (tr->array_buffer.buffer)
1428 ring_buffer_record_off(tr->array_buffer.buffer);
1430 * This flag is looked at when buffers haven't been allocated
1431 * yet, or by some tracers (like irqsoff), that just want to
1432 * know if the ring buffer has been disabled, but it can handle
1433 * races of where it gets disabled but we still do a record.
1434 * As the check is in the fast path of the tracers, it is more
1435 * important to be fast than accurate.
1437 tr->buffer_disabled = 1;
1438 /* Make the flag seen by readers */
1443 * tracing_off - turn off tracing buffers
1445 * This function stops the tracing buffers from recording data.
1446 * It does not disable any overhead the tracers themselves may
1447 * be causing. This function simply causes all recording to
1448 * the ring buffers to fail.
1450 void tracing_off(void)
1452 tracer_tracing_off(&global_trace);
1454 EXPORT_SYMBOL_GPL(tracing_off);
1456 void disable_trace_on_warning(void)
1458 if (__disable_trace_on_warning) {
1459 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1460 "Disabling tracing due to warning\n");
1466 * tracer_tracing_is_on - show real state of ring buffer enabled
1467 * @tr : the trace array to know if ring buffer is enabled
1469 * Shows real state of the ring buffer if it is enabled or not.
1471 bool tracer_tracing_is_on(struct trace_array *tr)
1473 if (tr->array_buffer.buffer)
1474 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1475 return !tr->buffer_disabled;
1479 * tracing_is_on - show state of ring buffers enabled
1481 int tracing_is_on(void)
1483 return tracer_tracing_is_on(&global_trace);
1485 EXPORT_SYMBOL_GPL(tracing_is_on);
1487 static int __init set_buf_size(char *str)
1489 unsigned long buf_size;
1493 buf_size = memparse(str, &str);
1494 /* nr_entries can not be zero */
1497 trace_buf_size = buf_size;
1500 __setup("trace_buf_size=", set_buf_size);
1502 static int __init set_tracing_thresh(char *str)
1504 unsigned long threshold;
1509 ret = kstrtoul(str, 0, &threshold);
1512 tracing_thresh = threshold * 1000;
1515 __setup("tracing_thresh=", set_tracing_thresh);
1517 unsigned long nsecs_to_usecs(unsigned long nsecs)
1519 return nsecs / 1000;
1523 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1524 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1525 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1526 * of strings in the order that the evals (enum) were defined.
1531 /* These must match the bit positions in trace_iterator_flags */
1532 static const char *trace_options[] = {
1540 int in_ns; /* is this clock in nanoseconds? */
1541 } trace_clocks[] = {
1542 { trace_clock_local, "local", 1 },
1543 { trace_clock_global, "global", 1 },
1544 { trace_clock_counter, "counter", 0 },
1545 { trace_clock_jiffies, "uptime", 0 },
1546 { trace_clock, "perf", 1 },
1547 { ktime_get_mono_fast_ns, "mono", 1 },
1548 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1549 { ktime_get_boot_fast_ns, "boot", 1 },
1553 bool trace_clock_in_ns(struct trace_array *tr)
1555 if (trace_clocks[tr->clock_id].in_ns)
1562 * trace_parser_get_init - gets the buffer for trace parser
1564 int trace_parser_get_init(struct trace_parser *parser, int size)
1566 memset(parser, 0, sizeof(*parser));
1568 parser->buffer = kmalloc(size, GFP_KERNEL);
1569 if (!parser->buffer)
1572 parser->size = size;
1577 * trace_parser_put - frees the buffer for trace parser
1579 void trace_parser_put(struct trace_parser *parser)
1581 kfree(parser->buffer);
1582 parser->buffer = NULL;
1586 * trace_get_user - reads the user input string separated by space
1587 * (matched by isspace(ch))
1589 * For each string found the 'struct trace_parser' is updated,
1590 * and the function returns.
1592 * Returns number of bytes read.
1594 * See kernel/trace/trace.h for 'struct trace_parser' details.
1596 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1597 size_t cnt, loff_t *ppos)
1604 trace_parser_clear(parser);
1606 ret = get_user(ch, ubuf++);
1614 * The parser is not finished with the last write,
1615 * continue reading the user input without skipping spaces.
1617 if (!parser->cont) {
1618 /* skip white space */
1619 while (cnt && isspace(ch)) {
1620 ret = get_user(ch, ubuf++);
1629 /* only spaces were written */
1630 if (isspace(ch) || !ch) {
1637 /* read the non-space input */
1638 while (cnt && !isspace(ch) && ch) {
1639 if (parser->idx < parser->size - 1)
1640 parser->buffer[parser->idx++] = ch;
1645 ret = get_user(ch, ubuf++);
1652 /* We either got finished input or we have to wait for another call. */
1653 if (isspace(ch) || !ch) {
1654 parser->buffer[parser->idx] = 0;
1655 parser->cont = false;
1656 } else if (parser->idx < parser->size - 1) {
1657 parser->cont = true;
1658 parser->buffer[parser->idx++] = ch;
1659 /* Make sure the parsed string always terminates with '\0'. */
1660 parser->buffer[parser->idx] = 0;
1673 /* TODO add a seq_buf_to_buffer() */
1674 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1678 if (trace_seq_used(s) <= s->seq.readpos)
1681 len = trace_seq_used(s) - s->seq.readpos;
1684 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1686 s->seq.readpos += cnt;
1690 unsigned long __read_mostly tracing_thresh;
1691 static const struct file_operations tracing_max_lat_fops;
1693 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1694 defined(CONFIG_FSNOTIFY)
1696 static struct workqueue_struct *fsnotify_wq;
1698 static void latency_fsnotify_workfn(struct work_struct *work)
1700 struct trace_array *tr = container_of(work, struct trace_array,
1702 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1705 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1707 struct trace_array *tr = container_of(iwork, struct trace_array,
1709 queue_work(fsnotify_wq, &tr->fsnotify_work);
1712 static void trace_create_maxlat_file(struct trace_array *tr,
1713 struct dentry *d_tracer)
1715 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1716 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1717 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1718 d_tracer, &tr->max_latency,
1719 &tracing_max_lat_fops);
1722 __init static int latency_fsnotify_init(void)
1724 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1725 WQ_UNBOUND | WQ_HIGHPRI, 0);
1727 pr_err("Unable to allocate tr_max_lat_wq\n");
1733 late_initcall_sync(latency_fsnotify_init);
1735 void latency_fsnotify(struct trace_array *tr)
1740 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1741 * possible that we are called from __schedule() or do_idle(), which
1742 * could cause a deadlock.
1744 irq_work_queue(&tr->fsnotify_irqwork);
1748 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1749 * defined(CONFIG_FSNOTIFY)
1753 #define trace_create_maxlat_file(tr, d_tracer) \
1754 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1755 &tr->max_latency, &tracing_max_lat_fops)
1759 #ifdef CONFIG_TRACER_MAX_TRACE
1761 * Copy the new maximum trace into the separate maximum-trace
1762 * structure. (this way the maximum trace is permanently saved,
1763 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1766 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1768 struct array_buffer *trace_buf = &tr->array_buffer;
1769 struct array_buffer *max_buf = &tr->max_buffer;
1770 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1771 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1774 max_buf->time_start = data->preempt_timestamp;
1776 max_data->saved_latency = tr->max_latency;
1777 max_data->critical_start = data->critical_start;
1778 max_data->critical_end = data->critical_end;
1780 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1781 max_data->pid = tsk->pid;
1783 * If tsk == current, then use current_uid(), as that does not use
1784 * RCU. The irq tracer can be called out of RCU scope.
1787 max_data->uid = current_uid();
1789 max_data->uid = task_uid(tsk);
1791 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1792 max_data->policy = tsk->policy;
1793 max_data->rt_priority = tsk->rt_priority;
1795 /* record this tasks comm */
1796 tracing_record_cmdline(tsk);
1797 latency_fsnotify(tr);
1801 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1803 * @tsk: the task with the latency
1804 * @cpu: The cpu that initiated the trace.
1805 * @cond_data: User data associated with a conditional snapshot
1807 * Flip the buffers between the @tr and the max_tr and record information
1808 * about which task was the cause of this latency.
1811 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1817 WARN_ON_ONCE(!irqs_disabled());
1819 if (!tr->allocated_snapshot) {
1820 /* Only the nop tracer should hit this when disabling */
1821 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1825 arch_spin_lock(&tr->max_lock);
1827 /* Inherit the recordable setting from array_buffer */
1828 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1829 ring_buffer_record_on(tr->max_buffer.buffer);
1831 ring_buffer_record_off(tr->max_buffer.buffer);
1833 #ifdef CONFIG_TRACER_SNAPSHOT
1834 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1837 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1839 __update_max_tr(tr, tsk, cpu);
1842 arch_spin_unlock(&tr->max_lock);
1846 * update_max_tr_single - only copy one trace over, and reset the rest
1848 * @tsk: task with the latency
1849 * @cpu: the cpu of the buffer to copy.
1851 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1854 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1861 WARN_ON_ONCE(!irqs_disabled());
1862 if (!tr->allocated_snapshot) {
1863 /* Only the nop tracer should hit this when disabling */
1864 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1868 arch_spin_lock(&tr->max_lock);
1870 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1872 if (ret == -EBUSY) {
1874 * We failed to swap the buffer due to a commit taking
1875 * place on this CPU. We fail to record, but we reset
1876 * the max trace buffer (no one writes directly to it)
1877 * and flag that it failed.
1879 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1880 "Failed to swap buffers due to commit in progress\n");
1883 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1885 __update_max_tr(tr, tsk, cpu);
1886 arch_spin_unlock(&tr->max_lock);
1888 #endif /* CONFIG_TRACER_MAX_TRACE */
1890 static int wait_on_pipe(struct trace_iterator *iter, int full)
1892 /* Iterators are static, they should be filled or empty */
1893 if (trace_buffer_iter(iter, iter->cpu_file))
1896 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1900 #ifdef CONFIG_FTRACE_STARTUP_TEST
1901 static bool selftests_can_run;
1903 struct trace_selftests {
1904 struct list_head list;
1905 struct tracer *type;
1908 static LIST_HEAD(postponed_selftests);
1910 static int save_selftest(struct tracer *type)
1912 struct trace_selftests *selftest;
1914 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1918 selftest->type = type;
1919 list_add(&selftest->list, &postponed_selftests);
1923 static int run_tracer_selftest(struct tracer *type)
1925 struct trace_array *tr = &global_trace;
1926 struct tracer *saved_tracer = tr->current_trace;
1929 if (!type->selftest || tracing_selftest_disabled)
1933 * If a tracer registers early in boot up (before scheduling is
1934 * initialized and such), then do not run its selftests yet.
1935 * Instead, run it a little later in the boot process.
1937 if (!selftests_can_run)
1938 return save_selftest(type);
1940 if (!tracing_is_on()) {
1941 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1947 * Run a selftest on this tracer.
1948 * Here we reset the trace buffer, and set the current
1949 * tracer to be this tracer. The tracer can then run some
1950 * internal tracing to verify that everything is in order.
1951 * If we fail, we do not register this tracer.
1953 tracing_reset_online_cpus(&tr->array_buffer);
1955 tr->current_trace = type;
1957 #ifdef CONFIG_TRACER_MAX_TRACE
1958 if (type->use_max_tr) {
1959 /* If we expanded the buffers, make sure the max is expanded too */
1960 if (ring_buffer_expanded)
1961 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1962 RING_BUFFER_ALL_CPUS);
1963 tr->allocated_snapshot = true;
1967 /* the test is responsible for initializing and enabling */
1968 pr_info("Testing tracer %s: ", type->name);
1969 ret = type->selftest(type, tr);
1970 /* the test is responsible for resetting too */
1971 tr->current_trace = saved_tracer;
1973 printk(KERN_CONT "FAILED!\n");
1974 /* Add the warning after printing 'FAILED' */
1978 /* Only reset on passing, to avoid touching corrupted buffers */
1979 tracing_reset_online_cpus(&tr->array_buffer);
1981 #ifdef CONFIG_TRACER_MAX_TRACE
1982 if (type->use_max_tr) {
1983 tr->allocated_snapshot = false;
1985 /* Shrink the max buffer again */
1986 if (ring_buffer_expanded)
1987 ring_buffer_resize(tr->max_buffer.buffer, 1,
1988 RING_BUFFER_ALL_CPUS);
1992 printk(KERN_CONT "PASSED\n");
1996 static __init int init_trace_selftests(void)
1998 struct trace_selftests *p, *n;
1999 struct tracer *t, **last;
2002 selftests_can_run = true;
2004 mutex_lock(&trace_types_lock);
2006 if (list_empty(&postponed_selftests))
2009 pr_info("Running postponed tracer tests:\n");
2011 tracing_selftest_running = true;
2012 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2013 /* This loop can take minutes when sanitizers are enabled, so
2014 * lets make sure we allow RCU processing.
2017 ret = run_tracer_selftest(p->type);
2018 /* If the test fails, then warn and remove from available_tracers */
2020 WARN(1, "tracer: %s failed selftest, disabling\n",
2022 last = &trace_types;
2023 for (t = trace_types; t; t = t->next) {
2034 tracing_selftest_running = false;
2037 mutex_unlock(&trace_types_lock);
2041 core_initcall(init_trace_selftests);
2043 static inline int run_tracer_selftest(struct tracer *type)
2047 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2049 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2051 static void __init apply_trace_boot_options(void);
2054 * register_tracer - register a tracer with the ftrace system.
2055 * @type: the plugin for the tracer
2057 * Register a new plugin tracer.
2059 int __init register_tracer(struct tracer *type)
2065 pr_info("Tracer must have a name\n");
2069 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2070 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2074 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2075 pr_warn("Can not register tracer %s due to lockdown\n",
2080 mutex_lock(&trace_types_lock);
2082 tracing_selftest_running = true;
2084 for (t = trace_types; t; t = t->next) {
2085 if (strcmp(type->name, t->name) == 0) {
2087 pr_info("Tracer %s already registered\n",
2094 if (!type->set_flag)
2095 type->set_flag = &dummy_set_flag;
2097 /*allocate a dummy tracer_flags*/
2098 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2103 type->flags->val = 0;
2104 type->flags->opts = dummy_tracer_opt;
2106 if (!type->flags->opts)
2107 type->flags->opts = dummy_tracer_opt;
2109 /* store the tracer for __set_tracer_option */
2110 type->flags->trace = type;
2112 ret = run_tracer_selftest(type);
2116 type->next = trace_types;
2118 add_tracer_options(&global_trace, type);
2121 tracing_selftest_running = false;
2122 mutex_unlock(&trace_types_lock);
2124 if (ret || !default_bootup_tracer)
2127 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2130 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2131 /* Do we want this tracer to start on bootup? */
2132 tracing_set_tracer(&global_trace, type->name);
2133 default_bootup_tracer = NULL;
2135 apply_trace_boot_options();
2137 /* disable other selftests, since this will break it. */
2138 disable_tracing_selftest("running a tracer");
2144 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2146 struct trace_buffer *buffer = buf->buffer;
2151 ring_buffer_record_disable(buffer);
2153 /* Make sure all commits have finished */
2155 ring_buffer_reset_cpu(buffer, cpu);
2157 ring_buffer_record_enable(buffer);
2160 void tracing_reset_online_cpus(struct array_buffer *buf)
2162 struct trace_buffer *buffer = buf->buffer;
2167 ring_buffer_record_disable(buffer);
2169 /* Make sure all commits have finished */
2172 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2174 ring_buffer_reset_online_cpus(buffer);
2176 ring_buffer_record_enable(buffer);
2179 /* Must have trace_types_lock held */
2180 void tracing_reset_all_online_cpus(void)
2182 struct trace_array *tr;
2184 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2185 if (!tr->clear_trace)
2187 tr->clear_trace = false;
2188 tracing_reset_online_cpus(&tr->array_buffer);
2189 #ifdef CONFIG_TRACER_MAX_TRACE
2190 tracing_reset_online_cpus(&tr->max_buffer);
2195 static int *tgid_map;
2197 #define SAVED_CMDLINES_DEFAULT 128
2198 #define NO_CMDLINE_MAP UINT_MAX
2199 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2200 struct saved_cmdlines_buffer {
2201 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2202 unsigned *map_cmdline_to_pid;
2203 unsigned cmdline_num;
2205 char *saved_cmdlines;
2207 static struct saved_cmdlines_buffer *savedcmd;
2209 /* temporary disable recording */
2210 static atomic_t trace_record_taskinfo_disabled __read_mostly;
2212 static inline char *get_saved_cmdlines(int idx)
2214 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2217 static inline void set_cmdline(int idx, const char *cmdline)
2219 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2222 static int allocate_cmdlines_buffer(unsigned int val,
2223 struct saved_cmdlines_buffer *s)
2225 s->map_cmdline_to_pid = kmalloc_array(val,
2226 sizeof(*s->map_cmdline_to_pid),
2228 if (!s->map_cmdline_to_pid)
2231 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2232 if (!s->saved_cmdlines) {
2233 kfree(s->map_cmdline_to_pid);
2238 s->cmdline_num = val;
2239 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2240 sizeof(s->map_pid_to_cmdline));
2241 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2242 val * sizeof(*s->map_cmdline_to_pid));
2247 static int trace_create_savedcmd(void)
2251 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2255 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2265 int is_tracing_stopped(void)
2267 return global_trace.stop_count;
2271 * tracing_start - quick start of the tracer
2273 * If tracing is enabled but was stopped by tracing_stop,
2274 * this will start the tracer back up.
2276 void tracing_start(void)
2278 struct trace_buffer *buffer;
2279 unsigned long flags;
2281 if (tracing_disabled)
2284 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2285 if (--global_trace.stop_count) {
2286 if (global_trace.stop_count < 0) {
2287 /* Someone screwed up their debugging */
2289 global_trace.stop_count = 0;
2294 /* Prevent the buffers from switching */
2295 arch_spin_lock(&global_trace.max_lock);
2297 buffer = global_trace.array_buffer.buffer;
2299 ring_buffer_record_enable(buffer);
2301 #ifdef CONFIG_TRACER_MAX_TRACE
2302 buffer = global_trace.max_buffer.buffer;
2304 ring_buffer_record_enable(buffer);
2307 arch_spin_unlock(&global_trace.max_lock);
2310 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2313 static void tracing_start_tr(struct trace_array *tr)
2315 struct trace_buffer *buffer;
2316 unsigned long flags;
2318 if (tracing_disabled)
2321 /* If global, we need to also start the max tracer */
2322 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2323 return tracing_start();
2325 raw_spin_lock_irqsave(&tr->start_lock, flags);
2327 if (--tr->stop_count) {
2328 if (tr->stop_count < 0) {
2329 /* Someone screwed up their debugging */
2336 buffer = tr->array_buffer.buffer;
2338 ring_buffer_record_enable(buffer);
2341 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2345 * tracing_stop - quick stop of the tracer
2347 * Light weight way to stop tracing. Use in conjunction with
2350 void tracing_stop(void)
2352 struct trace_buffer *buffer;
2353 unsigned long flags;
2355 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2356 if (global_trace.stop_count++)
2359 /* Prevent the buffers from switching */
2360 arch_spin_lock(&global_trace.max_lock);
2362 buffer = global_trace.array_buffer.buffer;
2364 ring_buffer_record_disable(buffer);
2366 #ifdef CONFIG_TRACER_MAX_TRACE
2367 buffer = global_trace.max_buffer.buffer;
2369 ring_buffer_record_disable(buffer);
2372 arch_spin_unlock(&global_trace.max_lock);
2375 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2378 static void tracing_stop_tr(struct trace_array *tr)
2380 struct trace_buffer *buffer;
2381 unsigned long flags;
2383 /* If global, we need to also stop the max tracer */
2384 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2385 return tracing_stop();
2387 raw_spin_lock_irqsave(&tr->start_lock, flags);
2388 if (tr->stop_count++)
2391 buffer = tr->array_buffer.buffer;
2393 ring_buffer_record_disable(buffer);
2396 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2399 static int trace_save_cmdline(struct task_struct *tsk)
2403 /* treat recording of idle task as a success */
2407 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2410 * It's not the end of the world if we don't get
2411 * the lock, but we also don't want to spin
2412 * nor do we want to disable interrupts,
2413 * so if we miss here, then better luck next time.
2415 if (!arch_spin_trylock(&trace_cmdline_lock))
2418 idx = savedcmd->map_pid_to_cmdline[tpid];
2419 if (idx == NO_CMDLINE_MAP) {
2420 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2422 savedcmd->map_pid_to_cmdline[tpid] = idx;
2423 savedcmd->cmdline_idx = idx;
2426 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2427 set_cmdline(idx, tsk->comm);
2429 arch_spin_unlock(&trace_cmdline_lock);
2434 static void __trace_find_cmdline(int pid, char comm[])
2440 strcpy(comm, "<idle>");
2444 if (WARN_ON_ONCE(pid < 0)) {
2445 strcpy(comm, "<XXX>");
2449 tpid = pid & (PID_MAX_DEFAULT - 1);
2450 map = savedcmd->map_pid_to_cmdline[tpid];
2451 if (map != NO_CMDLINE_MAP) {
2452 tpid = savedcmd->map_cmdline_to_pid[map];
2454 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2458 strcpy(comm, "<...>");
2461 void trace_find_cmdline(int pid, char comm[])
2464 arch_spin_lock(&trace_cmdline_lock);
2466 __trace_find_cmdline(pid, comm);
2468 arch_spin_unlock(&trace_cmdline_lock);
2472 int trace_find_tgid(int pid)
2474 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2477 return tgid_map[pid];
2480 static int trace_save_tgid(struct task_struct *tsk)
2482 /* treat recording of idle task as a success */
2486 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2489 tgid_map[tsk->pid] = tsk->tgid;
2493 static bool tracing_record_taskinfo_skip(int flags)
2495 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2497 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2499 if (!__this_cpu_read(trace_taskinfo_save))
2505 * tracing_record_taskinfo - record the task info of a task
2507 * @task: task to record
2508 * @flags: TRACE_RECORD_CMDLINE for recording comm
2509 * TRACE_RECORD_TGID for recording tgid
2511 void tracing_record_taskinfo(struct task_struct *task, int flags)
2515 if (tracing_record_taskinfo_skip(flags))
2519 * Record as much task information as possible. If some fail, continue
2520 * to try to record the others.
2522 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2523 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2525 /* If recording any information failed, retry again soon. */
2529 __this_cpu_write(trace_taskinfo_save, false);
2533 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2535 * @prev: previous task during sched_switch
2536 * @next: next task during sched_switch
2537 * @flags: TRACE_RECORD_CMDLINE for recording comm
2538 * TRACE_RECORD_TGID for recording tgid
2540 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2541 struct task_struct *next, int flags)
2545 if (tracing_record_taskinfo_skip(flags))
2549 * Record as much task information as possible. If some fail, continue
2550 * to try to record the others.
2552 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2553 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2554 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2555 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2557 /* If recording any information failed, retry again soon. */
2561 __this_cpu_write(trace_taskinfo_save, false);
2564 /* Helpers to record a specific task information */
2565 void tracing_record_cmdline(struct task_struct *task)
2567 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2570 void tracing_record_tgid(struct task_struct *task)
2572 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2576 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2577 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2578 * simplifies those functions and keeps them in sync.
2580 enum print_line_t trace_handle_return(struct trace_seq *s)
2582 return trace_seq_has_overflowed(s) ?
2583 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2585 EXPORT_SYMBOL_GPL(trace_handle_return);
2587 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2589 unsigned int trace_flags = irqs_status;
2592 pc = preempt_count();
2595 trace_flags |= TRACE_FLAG_NMI;
2596 if (pc & HARDIRQ_MASK)
2597 trace_flags |= TRACE_FLAG_HARDIRQ;
2598 if (in_serving_softirq())
2599 trace_flags |= TRACE_FLAG_SOFTIRQ;
2601 if (tif_need_resched())
2602 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2603 if (test_preempt_need_resched())
2604 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2605 return (trace_flags << 16) | (pc & 0xff);
2608 struct ring_buffer_event *
2609 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2612 unsigned int trace_ctx)
2614 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2617 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2618 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2619 static int trace_buffered_event_ref;
2622 * trace_buffered_event_enable - enable buffering events
2624 * When events are being filtered, it is quicker to use a temporary
2625 * buffer to write the event data into if there's a likely chance
2626 * that it will not be committed. The discard of the ring buffer
2627 * is not as fast as committing, and is much slower than copying
2630 * When an event is to be filtered, allocate per cpu buffers to
2631 * write the event data into, and if the event is filtered and discarded
2632 * it is simply dropped, otherwise, the entire data is to be committed
2635 void trace_buffered_event_enable(void)
2637 struct ring_buffer_event *event;
2641 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2643 if (trace_buffered_event_ref++)
2646 for_each_tracing_cpu(cpu) {
2647 page = alloc_pages_node(cpu_to_node(cpu),
2648 GFP_KERNEL | __GFP_NORETRY, 0);
2652 event = page_address(page);
2653 memset(event, 0, sizeof(*event));
2655 per_cpu(trace_buffered_event, cpu) = event;
2658 if (cpu == smp_processor_id() &&
2659 __this_cpu_read(trace_buffered_event) !=
2660 per_cpu(trace_buffered_event, cpu))
2667 trace_buffered_event_disable();
2670 static void enable_trace_buffered_event(void *data)
2672 /* Probably not needed, but do it anyway */
2674 this_cpu_dec(trace_buffered_event_cnt);
2677 static void disable_trace_buffered_event(void *data)
2679 this_cpu_inc(trace_buffered_event_cnt);
2683 * trace_buffered_event_disable - disable buffering events
2685 * When a filter is removed, it is faster to not use the buffered
2686 * events, and to commit directly into the ring buffer. Free up
2687 * the temp buffers when there are no more users. This requires
2688 * special synchronization with current events.
2690 void trace_buffered_event_disable(void)
2694 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2696 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2699 if (--trace_buffered_event_ref)
2703 /* For each CPU, set the buffer as used. */
2704 smp_call_function_many(tracing_buffer_mask,
2705 disable_trace_buffered_event, NULL, 1);
2708 /* Wait for all current users to finish */
2711 for_each_tracing_cpu(cpu) {
2712 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2713 per_cpu(trace_buffered_event, cpu) = NULL;
2716 * Make sure trace_buffered_event is NULL before clearing
2717 * trace_buffered_event_cnt.
2722 /* Do the work on each cpu */
2723 smp_call_function_many(tracing_buffer_mask,
2724 enable_trace_buffered_event, NULL, 1);
2728 static struct trace_buffer *temp_buffer;
2730 struct ring_buffer_event *
2731 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2732 struct trace_event_file *trace_file,
2733 int type, unsigned long len,
2734 unsigned int trace_ctx)
2736 struct ring_buffer_event *entry;
2737 struct trace_array *tr = trace_file->tr;
2740 *current_rb = tr->array_buffer.buffer;
2742 if (!tr->no_filter_buffering_ref &&
2743 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2744 (entry = this_cpu_read(trace_buffered_event))) {
2746 * Filtering is on, so try to use the per cpu buffer first.
2747 * This buffer will simulate a ring_buffer_event,
2748 * where the type_len is zero and the array[0] will
2749 * hold the full length.
2750 * (see include/linux/ring-buffer.h for details on
2751 * how the ring_buffer_event is structured).
2753 * Using a temp buffer during filtering and copying it
2754 * on a matched filter is quicker than writing directly
2755 * into the ring buffer and then discarding it when
2756 * it doesn't match. That is because the discard
2757 * requires several atomic operations to get right.
2758 * Copying on match and doing nothing on a failed match
2759 * is still quicker than no copy on match, but having
2760 * to discard out of the ring buffer on a failed match.
2762 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2764 val = this_cpu_inc_return(trace_buffered_event_cnt);
2767 * Preemption is disabled, but interrupts and NMIs
2768 * can still come in now. If that happens after
2769 * the above increment, then it will have to go
2770 * back to the old method of allocating the event
2771 * on the ring buffer, and if the filter fails, it
2772 * will have to call ring_buffer_discard_commit()
2775 * Need to also check the unlikely case that the
2776 * length is bigger than the temp buffer size.
2777 * If that happens, then the reserve is pretty much
2778 * guaranteed to fail, as the ring buffer currently
2779 * only allows events less than a page. But that may
2780 * change in the future, so let the ring buffer reserve
2781 * handle the failure in that case.
2783 if (val == 1 && likely(len <= max_len)) {
2784 trace_event_setup(entry, type, trace_ctx);
2785 entry->array[0] = len;
2788 this_cpu_dec(trace_buffered_event_cnt);
2791 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2794 * If tracing is off, but we have triggers enabled
2795 * we still need to look at the event data. Use the temp_buffer
2796 * to store the trace event for the trigger to use. It's recursive
2797 * safe and will not be recorded anywhere.
2799 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2800 *current_rb = temp_buffer;
2801 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2806 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2808 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2809 static DEFINE_MUTEX(tracepoint_printk_mutex);
2811 static void output_printk(struct trace_event_buffer *fbuffer)
2813 struct trace_event_call *event_call;
2814 struct trace_event_file *file;
2815 struct trace_event *event;
2816 unsigned long flags;
2817 struct trace_iterator *iter = tracepoint_print_iter;
2819 /* We should never get here if iter is NULL */
2820 if (WARN_ON_ONCE(!iter))
2823 event_call = fbuffer->trace_file->event_call;
2824 if (!event_call || !event_call->event.funcs ||
2825 !event_call->event.funcs->trace)
2828 file = fbuffer->trace_file;
2829 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2830 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2831 !filter_match_preds(file->filter, fbuffer->entry)))
2834 event = &fbuffer->trace_file->event_call->event;
2836 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2837 trace_seq_init(&iter->seq);
2838 iter->ent = fbuffer->entry;
2839 event_call->event.funcs->trace(iter, 0, event);
2840 trace_seq_putc(&iter->seq, 0);
2841 printk("%s", iter->seq.buffer);
2843 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2846 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2847 void *buffer, size_t *lenp,
2850 int save_tracepoint_printk;
2853 mutex_lock(&tracepoint_printk_mutex);
2854 save_tracepoint_printk = tracepoint_printk;
2856 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2859 * This will force exiting early, as tracepoint_printk
2860 * is always zero when tracepoint_printk_iter is not allocated
2862 if (!tracepoint_print_iter)
2863 tracepoint_printk = 0;
2865 if (save_tracepoint_printk == tracepoint_printk)
2868 if (tracepoint_printk)
2869 static_key_enable(&tracepoint_printk_key.key);
2871 static_key_disable(&tracepoint_printk_key.key);
2874 mutex_unlock(&tracepoint_printk_mutex);
2879 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2881 if (static_key_false(&tracepoint_printk_key.key))
2882 output_printk(fbuffer);
2884 if (static_branch_unlikely(&trace_event_exports_enabled))
2885 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2886 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2887 fbuffer->event, fbuffer->entry,
2888 fbuffer->trace_ctx, fbuffer->regs);
2890 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2895 * trace_buffer_unlock_commit_regs()
2896 * trace_event_buffer_commit()
2897 * trace_event_raw_event_xxx()
2899 # define STACK_SKIP 3
2901 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2902 struct trace_buffer *buffer,
2903 struct ring_buffer_event *event,
2904 unsigned int trace_ctx,
2905 struct pt_regs *regs)
2907 __buffer_unlock_commit(buffer, event);
2910 * If regs is not set, then skip the necessary functions.
2911 * Note, we can still get here via blktrace, wakeup tracer
2912 * and mmiotrace, but that's ok if they lose a function or
2913 * two. They are not that meaningful.
2915 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2916 ftrace_trace_userstack(tr, buffer, trace_ctx);
2920 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2923 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2924 struct ring_buffer_event *event)
2926 __buffer_unlock_commit(buffer, event);
2930 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2931 parent_ip, unsigned int trace_ctx)
2933 struct trace_event_call *call = &event_function;
2934 struct trace_buffer *buffer = tr->array_buffer.buffer;
2935 struct ring_buffer_event *event;
2936 struct ftrace_entry *entry;
2938 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2942 entry = ring_buffer_event_data(event);
2944 entry->parent_ip = parent_ip;
2946 if (!call_filter_check_discard(call, entry, buffer, event)) {
2947 if (static_branch_unlikely(&trace_function_exports_enabled))
2948 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2949 __buffer_unlock_commit(buffer, event);
2953 #ifdef CONFIG_STACKTRACE
2955 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2956 #define FTRACE_KSTACK_NESTING 4
2958 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2960 struct ftrace_stack {
2961 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2965 struct ftrace_stacks {
2966 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2969 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2970 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2972 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2973 unsigned int trace_ctx,
2974 int skip, struct pt_regs *regs)
2976 struct trace_event_call *call = &event_kernel_stack;
2977 struct ring_buffer_event *event;
2978 unsigned int size, nr_entries;
2979 struct ftrace_stack *fstack;
2980 struct stack_entry *entry;
2984 * Add one, for this function and the call to save_stack_trace()
2985 * If regs is set, then these functions will not be in the way.
2987 #ifndef CONFIG_UNWINDER_ORC
2992 preempt_disable_notrace();
2994 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2996 /* This should never happen. If it does, yell once and skip */
2997 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3001 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3002 * interrupt will either see the value pre increment or post
3003 * increment. If the interrupt happens pre increment it will have
3004 * restored the counter when it returns. We just need a barrier to
3005 * keep gcc from moving things around.
3009 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3010 size = ARRAY_SIZE(fstack->calls);
3013 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3016 nr_entries = stack_trace_save(fstack->calls, size, skip);
3019 size = nr_entries * sizeof(unsigned long);
3020 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3021 (sizeof(*entry) - sizeof(entry->caller)) + size,
3025 entry = ring_buffer_event_data(event);
3027 memcpy(&entry->caller, fstack->calls, size);
3028 entry->size = nr_entries;
3030 if (!call_filter_check_discard(call, entry, buffer, event))
3031 __buffer_unlock_commit(buffer, event);
3034 /* Again, don't let gcc optimize things here */
3036 __this_cpu_dec(ftrace_stack_reserve);
3037 preempt_enable_notrace();
3041 static inline void ftrace_trace_stack(struct trace_array *tr,
3042 struct trace_buffer *buffer,
3043 unsigned int trace_ctx,
3044 int skip, struct pt_regs *regs)
3046 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3049 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3052 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3055 struct trace_buffer *buffer = tr->array_buffer.buffer;
3057 if (rcu_is_watching()) {
3058 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3063 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3064 * but if the above rcu_is_watching() failed, then the NMI
3065 * triggered someplace critical, and rcu_irq_enter() should
3066 * not be called from NMI.
3068 if (unlikely(in_nmi()))
3071 rcu_irq_enter_irqson();
3072 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3073 rcu_irq_exit_irqson();
3077 * trace_dump_stack - record a stack back trace in the trace buffer
3078 * @skip: Number of functions to skip (helper handlers)
3080 void trace_dump_stack(int skip)
3082 if (tracing_disabled || tracing_selftest_running)
3085 #ifndef CONFIG_UNWINDER_ORC
3086 /* Skip 1 to skip this function. */
3089 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3090 tracing_gen_ctx(), skip, NULL);
3092 EXPORT_SYMBOL_GPL(trace_dump_stack);
3094 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3095 static DEFINE_PER_CPU(int, user_stack_count);
3098 ftrace_trace_userstack(struct trace_array *tr,
3099 struct trace_buffer *buffer, unsigned int trace_ctx)
3101 struct trace_event_call *call = &event_user_stack;
3102 struct ring_buffer_event *event;
3103 struct userstack_entry *entry;
3105 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3109 * NMIs can not handle page faults, even with fix ups.
3110 * The save user stack can (and often does) fault.
3112 if (unlikely(in_nmi()))
3116 * prevent recursion, since the user stack tracing may
3117 * trigger other kernel events.
3120 if (__this_cpu_read(user_stack_count))
3123 __this_cpu_inc(user_stack_count);
3125 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3126 sizeof(*entry), trace_ctx);
3128 goto out_drop_count;
3129 entry = ring_buffer_event_data(event);
3131 entry->tgid = current->tgid;
3132 memset(&entry->caller, 0, sizeof(entry->caller));
3134 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3135 if (!call_filter_check_discard(call, entry, buffer, event))
3136 __buffer_unlock_commit(buffer, event);
3139 __this_cpu_dec(user_stack_count);
3143 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3144 static void ftrace_trace_userstack(struct trace_array *tr,
3145 struct trace_buffer *buffer,
3146 unsigned int trace_ctx)
3149 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3151 #endif /* CONFIG_STACKTRACE */
3154 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3155 unsigned long long delta)
3157 entry->bottom_delta_ts = delta & U32_MAX;
3158 entry->top_delta_ts = (delta >> 32);
3161 void trace_last_func_repeats(struct trace_array *tr,
3162 struct trace_func_repeats *last_info,
3163 unsigned int trace_ctx)
3165 struct trace_buffer *buffer = tr->array_buffer.buffer;
3166 struct func_repeats_entry *entry;
3167 struct ring_buffer_event *event;
3170 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3171 sizeof(*entry), trace_ctx);
3175 delta = ring_buffer_event_time_stamp(buffer, event) -
3176 last_info->ts_last_call;
3178 entry = ring_buffer_event_data(event);
3179 entry->ip = last_info->ip;
3180 entry->parent_ip = last_info->parent_ip;
3181 entry->count = last_info->count;
3182 func_repeats_set_delta_ts(entry, delta);
3184 __buffer_unlock_commit(buffer, event);
3187 /* created for use with alloc_percpu */
3188 struct trace_buffer_struct {
3190 char buffer[4][TRACE_BUF_SIZE];
3193 static struct trace_buffer_struct *trace_percpu_buffer;
3196 * This allows for lockless recording. If we're nested too deeply, then
3197 * this returns NULL.
3199 static char *get_trace_buf(void)
3201 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3203 if (!buffer || buffer->nesting >= 4)
3208 /* Interrupts must see nesting incremented before we use the buffer */
3210 return &buffer->buffer[buffer->nesting - 1][0];
3213 static void put_trace_buf(void)
3215 /* Don't let the decrement of nesting leak before this */
3217 this_cpu_dec(trace_percpu_buffer->nesting);
3220 static int alloc_percpu_trace_buffer(void)
3222 struct trace_buffer_struct *buffers;
3224 if (trace_percpu_buffer)
3227 buffers = alloc_percpu(struct trace_buffer_struct);
3228 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3231 trace_percpu_buffer = buffers;
3235 static int buffers_allocated;
3237 void trace_printk_init_buffers(void)
3239 if (buffers_allocated)
3242 if (alloc_percpu_trace_buffer())
3245 /* trace_printk() is for debug use only. Don't use it in production. */
3248 pr_warn("**********************************************************\n");
3249 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3251 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3253 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3254 pr_warn("** unsafe for production use. **\n");
3256 pr_warn("** If you see this message and you are not debugging **\n");
3257 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3259 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3260 pr_warn("**********************************************************\n");
3262 /* Expand the buffers to set size */
3263 tracing_update_buffers();
3265 buffers_allocated = 1;
3268 * trace_printk_init_buffers() can be called by modules.
3269 * If that happens, then we need to start cmdline recording
3270 * directly here. If the global_trace.buffer is already
3271 * allocated here, then this was called by module code.
3273 if (global_trace.array_buffer.buffer)
3274 tracing_start_cmdline_record();
3276 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3278 void trace_printk_start_comm(void)
3280 /* Start tracing comms if trace printk is set */
3281 if (!buffers_allocated)
3283 tracing_start_cmdline_record();
3286 static void trace_printk_start_stop_comm(int enabled)
3288 if (!buffers_allocated)
3292 tracing_start_cmdline_record();
3294 tracing_stop_cmdline_record();
3298 * trace_vbprintk - write binary msg to tracing buffer
3299 * @ip: The address of the caller
3300 * @fmt: The string format to write to the buffer
3301 * @args: Arguments for @fmt
3303 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3305 struct trace_event_call *call = &event_bprint;
3306 struct ring_buffer_event *event;
3307 struct trace_buffer *buffer;
3308 struct trace_array *tr = &global_trace;
3309 struct bprint_entry *entry;
3310 unsigned int trace_ctx;
3314 if (unlikely(tracing_selftest_running || tracing_disabled))
3317 /* Don't pollute graph traces with trace_vprintk internals */
3318 pause_graph_tracing();
3320 trace_ctx = tracing_gen_ctx();
3321 preempt_disable_notrace();
3323 tbuffer = get_trace_buf();
3329 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3331 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3334 size = sizeof(*entry) + sizeof(u32) * len;
3335 buffer = tr->array_buffer.buffer;
3336 ring_buffer_nest_start(buffer);
3337 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3341 entry = ring_buffer_event_data(event);
3345 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3346 if (!call_filter_check_discard(call, entry, buffer, event)) {
3347 __buffer_unlock_commit(buffer, event);
3348 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3352 ring_buffer_nest_end(buffer);
3357 preempt_enable_notrace();
3358 unpause_graph_tracing();
3362 EXPORT_SYMBOL_GPL(trace_vbprintk);
3366 __trace_array_vprintk(struct trace_buffer *buffer,
3367 unsigned long ip, const char *fmt, va_list args)
3369 struct trace_event_call *call = &event_print;
3370 struct ring_buffer_event *event;
3372 struct print_entry *entry;
3373 unsigned int trace_ctx;
3376 if (tracing_disabled || tracing_selftest_running)
3379 /* Don't pollute graph traces with trace_vprintk internals */
3380 pause_graph_tracing();
3382 trace_ctx = tracing_gen_ctx();
3383 preempt_disable_notrace();
3386 tbuffer = get_trace_buf();
3392 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3394 size = sizeof(*entry) + len + 1;
3395 ring_buffer_nest_start(buffer);
3396 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3400 entry = ring_buffer_event_data(event);
3403 memcpy(&entry->buf, tbuffer, len + 1);
3404 if (!call_filter_check_discard(call, entry, buffer, event)) {
3405 __buffer_unlock_commit(buffer, event);
3406 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3410 ring_buffer_nest_end(buffer);
3414 preempt_enable_notrace();
3415 unpause_graph_tracing();
3421 int trace_array_vprintk(struct trace_array *tr,
3422 unsigned long ip, const char *fmt, va_list args)
3424 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3428 * trace_array_printk - Print a message to a specific instance
3429 * @tr: The instance trace_array descriptor
3430 * @ip: The instruction pointer that this is called from.
3431 * @fmt: The format to print (printf format)
3433 * If a subsystem sets up its own instance, they have the right to
3434 * printk strings into their tracing instance buffer using this
3435 * function. Note, this function will not write into the top level
3436 * buffer (use trace_printk() for that), as writing into the top level
3437 * buffer should only have events that can be individually disabled.
3438 * trace_printk() is only used for debugging a kernel, and should not
3439 * be ever incorporated in normal use.
3441 * trace_array_printk() can be used, as it will not add noise to the
3442 * top level tracing buffer.
3444 * Note, trace_array_init_printk() must be called on @tr before this
3448 int trace_array_printk(struct trace_array *tr,
3449 unsigned long ip, const char *fmt, ...)
3457 /* This is only allowed for created instances */
3458 if (tr == &global_trace)
3461 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3465 ret = trace_array_vprintk(tr, ip, fmt, ap);
3469 EXPORT_SYMBOL_GPL(trace_array_printk);
3472 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3473 * @tr: The trace array to initialize the buffers for
3475 * As trace_array_printk() only writes into instances, they are OK to
3476 * have in the kernel (unlike trace_printk()). This needs to be called
3477 * before trace_array_printk() can be used on a trace_array.
3479 int trace_array_init_printk(struct trace_array *tr)
3484 /* This is only allowed for created instances */
3485 if (tr == &global_trace)
3488 return alloc_percpu_trace_buffer();
3490 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3493 int trace_array_printk_buf(struct trace_buffer *buffer,
3494 unsigned long ip, const char *fmt, ...)
3499 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3503 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3509 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3511 return trace_array_vprintk(&global_trace, ip, fmt, args);
3513 EXPORT_SYMBOL_GPL(trace_vprintk);
3515 static void trace_iterator_increment(struct trace_iterator *iter)
3517 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3521 ring_buffer_iter_advance(buf_iter);
3524 static struct trace_entry *
3525 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3526 unsigned long *lost_events)
3528 struct ring_buffer_event *event;
3529 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3532 event = ring_buffer_iter_peek(buf_iter, ts);
3534 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3535 (unsigned long)-1 : 0;
3537 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3542 iter->ent_size = ring_buffer_event_length(event);
3543 return ring_buffer_event_data(event);
3549 static struct trace_entry *
3550 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3551 unsigned long *missing_events, u64 *ent_ts)
3553 struct trace_buffer *buffer = iter->array_buffer->buffer;
3554 struct trace_entry *ent, *next = NULL;
3555 unsigned long lost_events = 0, next_lost = 0;
3556 int cpu_file = iter->cpu_file;
3557 u64 next_ts = 0, ts;
3563 * If we are in a per_cpu trace file, don't bother by iterating over
3564 * all cpu and peek directly.
3566 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3567 if (ring_buffer_empty_cpu(buffer, cpu_file))
3569 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3571 *ent_cpu = cpu_file;
3576 for_each_tracing_cpu(cpu) {
3578 if (ring_buffer_empty_cpu(buffer, cpu))
3581 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3584 * Pick the entry with the smallest timestamp:
3586 if (ent && (!next || ts < next_ts)) {
3590 next_lost = lost_events;
3591 next_size = iter->ent_size;
3595 iter->ent_size = next_size;
3598 *ent_cpu = next_cpu;
3604 *missing_events = next_lost;
3609 #define STATIC_FMT_BUF_SIZE 128
3610 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3612 static char *trace_iter_expand_format(struct trace_iterator *iter)
3617 * iter->tr is NULL when used with tp_printk, which makes
3618 * this get called where it is not safe to call krealloc().
3620 if (!iter->tr || iter->fmt == static_fmt_buf)
3623 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3626 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3633 /* Returns true if the string is safe to dereference from an event */
3634 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3636 unsigned long addr = (unsigned long)str;
3637 struct trace_event *trace_event;
3638 struct trace_event_call *event;
3640 /* OK if part of the event data */
3641 if ((addr >= (unsigned long)iter->ent) &&
3642 (addr < (unsigned long)iter->ent + iter->ent_size))
3645 /* OK if part of the temp seq buffer */
3646 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3647 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3650 /* Core rodata can not be freed */
3651 if (is_kernel_rodata(addr))
3654 if (trace_is_tracepoint_string(str))
3658 * Now this could be a module event, referencing core module
3659 * data, which is OK.
3664 trace_event = ftrace_find_event(iter->ent->type);
3668 event = container_of(trace_event, struct trace_event_call, event);
3672 /* Would rather have rodata, but this will suffice */
3673 if (within_module_core(addr, event->mod))
3679 static const char *show_buffer(struct trace_seq *s)
3681 struct seq_buf *seq = &s->seq;
3683 seq_buf_terminate(seq);
3688 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3690 static int test_can_verify_check(const char *fmt, ...)
3697 * The verifier is dependent on vsnprintf() modifies the va_list
3698 * passed to it, where it is sent as a reference. Some architectures
3699 * (like x86_32) passes it by value, which means that vsnprintf()
3700 * does not modify the va_list passed to it, and the verifier
3701 * would then need to be able to understand all the values that
3702 * vsnprintf can use. If it is passed by value, then the verifier
3706 vsnprintf(buf, 16, "%d", ap);
3707 ret = va_arg(ap, int);
3713 static void test_can_verify(void)
3715 if (!test_can_verify_check("%d %d", 0, 1)) {
3716 pr_info("trace event string verifier disabled\n");
3717 static_branch_inc(&trace_no_verify);
3722 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3723 * @iter: The iterator that holds the seq buffer and the event being printed
3724 * @fmt: The format used to print the event
3725 * @ap: The va_list holding the data to print from @fmt.
3727 * This writes the data into the @iter->seq buffer using the data from
3728 * @fmt and @ap. If the format has a %s, then the source of the string
3729 * is examined to make sure it is safe to print, otherwise it will
3730 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3733 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3736 const char *p = fmt;
3740 if (WARN_ON_ONCE(!fmt))
3743 if (static_branch_unlikely(&trace_no_verify))
3746 /* Don't bother checking when doing a ftrace_dump() */
3747 if (iter->fmt == static_fmt_buf)
3756 /* We only care about %s and variants */
3757 for (i = 0; p[i]; i++) {
3758 if (i + 1 >= iter->fmt_size) {
3760 * If we can't expand the copy buffer,
3763 if (!trace_iter_expand_format(iter))
3767 if (p[i] == '\\' && p[i+1]) {
3772 /* Need to test cases like %08.*s */
3773 for (j = 1; p[i+j]; j++) {
3774 if (isdigit(p[i+j]) ||
3777 if (p[i+j] == '*') {
3789 /* If no %s found then just print normally */
3793 /* Copy up to the %s, and print that */
3794 strncpy(iter->fmt, p, i);
3795 iter->fmt[i] = '\0';
3796 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3799 len = va_arg(ap, int);
3801 /* The ap now points to the string data of the %s */
3802 str = va_arg(ap, const char *);
3805 * If you hit this warning, it is likely that the
3806 * trace event in question used %s on a string that
3807 * was saved at the time of the event, but may not be
3808 * around when the trace is read. Use __string(),
3809 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3810 * instead. See samples/trace_events/trace-events-sample.h
3813 if (WARN_ONCE(!trace_safe_str(iter, str),
3814 "fmt: '%s' current_buffer: '%s'",
3815 fmt, show_buffer(&iter->seq))) {
3818 /* Try to safely read the string */
3820 if (len + 1 > iter->fmt_size)
3821 len = iter->fmt_size - 1;
3824 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3828 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3832 trace_seq_printf(&iter->seq, "(0x%px)", str);
3834 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3836 str = "[UNSAFE-MEMORY]";
3837 strcpy(iter->fmt, "%s");
3839 strncpy(iter->fmt, p + i, j + 1);
3840 iter->fmt[j+1] = '\0';
3843 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3845 trace_seq_printf(&iter->seq, iter->fmt, str);
3851 trace_seq_vprintf(&iter->seq, p, ap);
3854 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3856 const char *p, *new_fmt;
3859 if (WARN_ON_ONCE(!fmt))
3862 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3866 new_fmt = q = iter->fmt;
3868 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3869 if (!trace_iter_expand_format(iter))
3872 q += iter->fmt - new_fmt;
3873 new_fmt = iter->fmt;
3878 /* Replace %p with %px */
3882 } else if (p[0] == 'p' && !isalnum(p[1])) {
3893 #define STATIC_TEMP_BUF_SIZE 128
3894 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3896 /* Find the next real entry, without updating the iterator itself */
3897 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3898 int *ent_cpu, u64 *ent_ts)
3900 /* __find_next_entry will reset ent_size */
3901 int ent_size = iter->ent_size;
3902 struct trace_entry *entry;
3905 * If called from ftrace_dump(), then the iter->temp buffer
3906 * will be the static_temp_buf and not created from kmalloc.
3907 * If the entry size is greater than the buffer, we can
3908 * not save it. Just return NULL in that case. This is only
3909 * used to add markers when two consecutive events' time
3910 * stamps have a large delta. See trace_print_lat_context()
3912 if (iter->temp == static_temp_buf &&
3913 STATIC_TEMP_BUF_SIZE < ent_size)
3917 * The __find_next_entry() may call peek_next_entry(), which may
3918 * call ring_buffer_peek() that may make the contents of iter->ent
3919 * undefined. Need to copy iter->ent now.
3921 if (iter->ent && iter->ent != iter->temp) {
3922 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3923 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3925 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3930 iter->temp_size = iter->ent_size;
3932 memcpy(iter->temp, iter->ent, iter->ent_size);
3933 iter->ent = iter->temp;
3935 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3936 /* Put back the original ent_size */
3937 iter->ent_size = ent_size;
3942 /* Find the next real entry, and increment the iterator to the next entry */
3943 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3945 iter->ent = __find_next_entry(iter, &iter->cpu,
3946 &iter->lost_events, &iter->ts);
3949 trace_iterator_increment(iter);
3951 return iter->ent ? iter : NULL;
3954 static void trace_consume(struct trace_iterator *iter)
3956 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3957 &iter->lost_events);
3960 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3962 struct trace_iterator *iter = m->private;
3966 WARN_ON_ONCE(iter->leftover);
3970 /* can't go backwards */
3975 ent = trace_find_next_entry_inc(iter);
3979 while (ent && iter->idx < i)
3980 ent = trace_find_next_entry_inc(iter);
3987 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3989 struct ring_buffer_iter *buf_iter;
3990 unsigned long entries = 0;
3993 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3995 buf_iter = trace_buffer_iter(iter, cpu);
3999 ring_buffer_iter_reset(buf_iter);
4002 * We could have the case with the max latency tracers
4003 * that a reset never took place on a cpu. This is evident
4004 * by the timestamp being before the start of the buffer.
4006 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4007 if (ts >= iter->array_buffer->time_start)
4010 ring_buffer_iter_advance(buf_iter);
4013 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4017 * The current tracer is copied to avoid a global locking
4020 static void *s_start(struct seq_file *m, loff_t *pos)
4022 struct trace_iterator *iter = m->private;
4023 struct trace_array *tr = iter->tr;
4024 int cpu_file = iter->cpu_file;
4030 * copy the tracer to avoid using a global lock all around.
4031 * iter->trace is a copy of current_trace, the pointer to the
4032 * name may be used instead of a strcmp(), as iter->trace->name
4033 * will point to the same string as current_trace->name.
4035 mutex_lock(&trace_types_lock);
4036 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4037 *iter->trace = *tr->current_trace;
4038 mutex_unlock(&trace_types_lock);
4040 #ifdef CONFIG_TRACER_MAX_TRACE
4041 if (iter->snapshot && iter->trace->use_max_tr)
4042 return ERR_PTR(-EBUSY);
4045 if (!iter->snapshot)
4046 atomic_inc(&trace_record_taskinfo_disabled);
4048 if (*pos != iter->pos) {
4053 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4054 for_each_tracing_cpu(cpu)
4055 tracing_iter_reset(iter, cpu);
4057 tracing_iter_reset(iter, cpu_file);
4060 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4065 * If we overflowed the seq_file before, then we want
4066 * to just reuse the trace_seq buffer again.
4072 p = s_next(m, p, &l);
4076 trace_event_read_lock();
4077 trace_access_lock(cpu_file);
4081 static void s_stop(struct seq_file *m, void *p)
4083 struct trace_iterator *iter = m->private;
4085 #ifdef CONFIG_TRACER_MAX_TRACE
4086 if (iter->snapshot && iter->trace->use_max_tr)
4090 if (!iter->snapshot)
4091 atomic_dec(&trace_record_taskinfo_disabled);
4093 trace_access_unlock(iter->cpu_file);
4094 trace_event_read_unlock();
4098 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4099 unsigned long *entries, int cpu)
4101 unsigned long count;
4103 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4105 * If this buffer has skipped entries, then we hold all
4106 * entries for the trace and we need to ignore the
4107 * ones before the time stamp.
4109 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4110 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4111 /* total is the same as the entries */
4115 ring_buffer_overrun_cpu(buf->buffer, cpu);
4120 get_total_entries(struct array_buffer *buf,
4121 unsigned long *total, unsigned long *entries)
4129 for_each_tracing_cpu(cpu) {
4130 get_total_entries_cpu(buf, &t, &e, cpu);
4136 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4138 unsigned long total, entries;
4143 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4148 unsigned long trace_total_entries(struct trace_array *tr)
4150 unsigned long total, entries;
4155 get_total_entries(&tr->array_buffer, &total, &entries);
4160 static void print_lat_help_header(struct seq_file *m)
4162 seq_puts(m, "# _------=> CPU# \n"
4163 "# / _-----=> irqs-off \n"
4164 "# | / _----=> need-resched \n"
4165 "# || / _---=> hardirq/softirq \n"
4166 "# ||| / _--=> preempt-depth \n"
4168 "# cmd pid ||||| time | caller \n"
4169 "# \\ / ||||| \\ | / \n");
4172 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4174 unsigned long total;
4175 unsigned long entries;
4177 get_total_entries(buf, &total, &entries);
4178 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4179 entries, total, num_online_cpus());
4183 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4186 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4188 print_event_info(buf, m);
4190 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4191 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4194 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4197 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4198 const char *space = " ";
4199 int prec = tgid ? 12 : 2;
4201 print_event_info(buf, m);
4203 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
4204 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4205 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4206 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4207 seq_printf(m, "# %.*s||| / delay\n", prec, space);
4208 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4209 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
4213 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4215 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4216 struct array_buffer *buf = iter->array_buffer;
4217 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4218 struct tracer *type = iter->trace;
4219 unsigned long entries;
4220 unsigned long total;
4221 const char *name = "preemption";
4225 get_total_entries(buf, &total, &entries);
4227 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4229 seq_puts(m, "# -----------------------------------"
4230 "---------------------------------\n");
4231 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4232 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4233 nsecs_to_usecs(data->saved_latency),
4237 #if defined(CONFIG_PREEMPT_NONE)
4239 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
4241 #elif defined(CONFIG_PREEMPT)
4243 #elif defined(CONFIG_PREEMPT_RT)
4248 /* These are reserved for later use */
4251 seq_printf(m, " #P:%d)\n", num_online_cpus());
4255 seq_puts(m, "# -----------------\n");
4256 seq_printf(m, "# | task: %.16s-%d "
4257 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4258 data->comm, data->pid,
4259 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4260 data->policy, data->rt_priority);
4261 seq_puts(m, "# -----------------\n");
4263 if (data->critical_start) {
4264 seq_puts(m, "# => started at: ");
4265 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4266 trace_print_seq(m, &iter->seq);
4267 seq_puts(m, "\n# => ended at: ");
4268 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4269 trace_print_seq(m, &iter->seq);
4270 seq_puts(m, "\n#\n");
4276 static void test_cpu_buff_start(struct trace_iterator *iter)
4278 struct trace_seq *s = &iter->seq;
4279 struct trace_array *tr = iter->tr;
4281 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4284 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4287 if (cpumask_available(iter->started) &&
4288 cpumask_test_cpu(iter->cpu, iter->started))
4291 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4294 if (cpumask_available(iter->started))
4295 cpumask_set_cpu(iter->cpu, iter->started);
4297 /* Don't print started cpu buffer for the first entry of the trace */
4299 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4303 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4305 struct trace_array *tr = iter->tr;
4306 struct trace_seq *s = &iter->seq;
4307 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4308 struct trace_entry *entry;
4309 struct trace_event *event;
4313 test_cpu_buff_start(iter);
4315 event = ftrace_find_event(entry->type);
4317 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4318 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4319 trace_print_lat_context(iter);
4321 trace_print_context(iter);
4324 if (trace_seq_has_overflowed(s))
4325 return TRACE_TYPE_PARTIAL_LINE;
4328 return event->funcs->trace(iter, sym_flags, event);
4330 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4332 return trace_handle_return(s);
4335 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4337 struct trace_array *tr = iter->tr;
4338 struct trace_seq *s = &iter->seq;
4339 struct trace_entry *entry;
4340 struct trace_event *event;
4344 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4345 trace_seq_printf(s, "%d %d %llu ",
4346 entry->pid, iter->cpu, iter->ts);
4348 if (trace_seq_has_overflowed(s))
4349 return TRACE_TYPE_PARTIAL_LINE;
4351 event = ftrace_find_event(entry->type);
4353 return event->funcs->raw(iter, 0, event);
4355 trace_seq_printf(s, "%d ?\n", entry->type);
4357 return trace_handle_return(s);
4360 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4362 struct trace_array *tr = iter->tr;
4363 struct trace_seq *s = &iter->seq;
4364 unsigned char newline = '\n';
4365 struct trace_entry *entry;
4366 struct trace_event *event;
4370 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4371 SEQ_PUT_HEX_FIELD(s, entry->pid);
4372 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4373 SEQ_PUT_HEX_FIELD(s, iter->ts);
4374 if (trace_seq_has_overflowed(s))
4375 return TRACE_TYPE_PARTIAL_LINE;
4378 event = ftrace_find_event(entry->type);
4380 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4381 if (ret != TRACE_TYPE_HANDLED)
4385 SEQ_PUT_FIELD(s, newline);
4387 return trace_handle_return(s);
4390 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4392 struct trace_array *tr = iter->tr;
4393 struct trace_seq *s = &iter->seq;
4394 struct trace_entry *entry;
4395 struct trace_event *event;
4399 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4400 SEQ_PUT_FIELD(s, entry->pid);
4401 SEQ_PUT_FIELD(s, iter->cpu);
4402 SEQ_PUT_FIELD(s, iter->ts);
4403 if (trace_seq_has_overflowed(s))
4404 return TRACE_TYPE_PARTIAL_LINE;
4407 event = ftrace_find_event(entry->type);
4408 return event ? event->funcs->binary(iter, 0, event) :
4412 int trace_empty(struct trace_iterator *iter)
4414 struct ring_buffer_iter *buf_iter;
4417 /* If we are looking at one CPU buffer, only check that one */
4418 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4419 cpu = iter->cpu_file;
4420 buf_iter = trace_buffer_iter(iter, cpu);
4422 if (!ring_buffer_iter_empty(buf_iter))
4425 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4431 for_each_tracing_cpu(cpu) {
4432 buf_iter = trace_buffer_iter(iter, cpu);
4434 if (!ring_buffer_iter_empty(buf_iter))
4437 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4445 /* Called with trace_event_read_lock() held. */
4446 enum print_line_t print_trace_line(struct trace_iterator *iter)
4448 struct trace_array *tr = iter->tr;
4449 unsigned long trace_flags = tr->trace_flags;
4450 enum print_line_t ret;
4452 if (iter->lost_events) {
4453 if (iter->lost_events == (unsigned long)-1)
4454 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4457 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4458 iter->cpu, iter->lost_events);
4459 if (trace_seq_has_overflowed(&iter->seq))
4460 return TRACE_TYPE_PARTIAL_LINE;
4463 if (iter->trace && iter->trace->print_line) {
4464 ret = iter->trace->print_line(iter);
4465 if (ret != TRACE_TYPE_UNHANDLED)
4469 if (iter->ent->type == TRACE_BPUTS &&
4470 trace_flags & TRACE_ITER_PRINTK &&
4471 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4472 return trace_print_bputs_msg_only(iter);
4474 if (iter->ent->type == TRACE_BPRINT &&
4475 trace_flags & TRACE_ITER_PRINTK &&
4476 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4477 return trace_print_bprintk_msg_only(iter);
4479 if (iter->ent->type == TRACE_PRINT &&
4480 trace_flags & TRACE_ITER_PRINTK &&
4481 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4482 return trace_print_printk_msg_only(iter);
4484 if (trace_flags & TRACE_ITER_BIN)
4485 return print_bin_fmt(iter);
4487 if (trace_flags & TRACE_ITER_HEX)
4488 return print_hex_fmt(iter);
4490 if (trace_flags & TRACE_ITER_RAW)
4491 return print_raw_fmt(iter);
4493 return print_trace_fmt(iter);
4496 void trace_latency_header(struct seq_file *m)
4498 struct trace_iterator *iter = m->private;
4499 struct trace_array *tr = iter->tr;
4501 /* print nothing if the buffers are empty */
4502 if (trace_empty(iter))
4505 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4506 print_trace_header(m, iter);
4508 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4509 print_lat_help_header(m);
4512 void trace_default_header(struct seq_file *m)
4514 struct trace_iterator *iter = m->private;
4515 struct trace_array *tr = iter->tr;
4516 unsigned long trace_flags = tr->trace_flags;
4518 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4521 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4522 /* print nothing if the buffers are empty */
4523 if (trace_empty(iter))
4525 print_trace_header(m, iter);
4526 if (!(trace_flags & TRACE_ITER_VERBOSE))
4527 print_lat_help_header(m);
4529 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4530 if (trace_flags & TRACE_ITER_IRQ_INFO)
4531 print_func_help_header_irq(iter->array_buffer,
4534 print_func_help_header(iter->array_buffer, m,
4540 static void test_ftrace_alive(struct seq_file *m)
4542 if (!ftrace_is_dead())
4544 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4545 "# MAY BE MISSING FUNCTION EVENTS\n");
4548 #ifdef CONFIG_TRACER_MAX_TRACE
4549 static void show_snapshot_main_help(struct seq_file *m)
4551 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4552 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4553 "# Takes a snapshot of the main buffer.\n"
4554 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4555 "# (Doesn't have to be '2' works with any number that\n"
4556 "# is not a '0' or '1')\n");
4559 static void show_snapshot_percpu_help(struct seq_file *m)
4561 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4562 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4563 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4564 "# Takes a snapshot of the main buffer for this cpu.\n");
4566 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4567 "# Must use main snapshot file to allocate.\n");
4569 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4570 "# (Doesn't have to be '2' works with any number that\n"
4571 "# is not a '0' or '1')\n");
4574 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4576 if (iter->tr->allocated_snapshot)
4577 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4579 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4581 seq_puts(m, "# Snapshot commands:\n");
4582 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4583 show_snapshot_main_help(m);
4585 show_snapshot_percpu_help(m);
4588 /* Should never be called */
4589 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4592 static int s_show(struct seq_file *m, void *v)
4594 struct trace_iterator *iter = v;
4597 if (iter->ent == NULL) {
4599 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4601 test_ftrace_alive(m);
4603 if (iter->snapshot && trace_empty(iter))
4604 print_snapshot_help(m, iter);
4605 else if (iter->trace && iter->trace->print_header)
4606 iter->trace->print_header(m);
4608 trace_default_header(m);
4610 } else if (iter->leftover) {
4612 * If we filled the seq_file buffer earlier, we
4613 * want to just show it now.
4615 ret = trace_print_seq(m, &iter->seq);
4617 /* ret should this time be zero, but you never know */
4618 iter->leftover = ret;
4621 print_trace_line(iter);
4622 ret = trace_print_seq(m, &iter->seq);
4624 * If we overflow the seq_file buffer, then it will
4625 * ask us for this data again at start up.
4627 * ret is 0 if seq_file write succeeded.
4630 iter->leftover = ret;
4637 * Should be used after trace_array_get(), trace_types_lock
4638 * ensures that i_cdev was already initialized.
4640 static inline int tracing_get_cpu(struct inode *inode)
4642 if (inode->i_cdev) /* See trace_create_cpu_file() */
4643 return (long)inode->i_cdev - 1;
4644 return RING_BUFFER_ALL_CPUS;
4647 static const struct seq_operations tracer_seq_ops = {
4654 static struct trace_iterator *
4655 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4657 struct trace_array *tr = inode->i_private;
4658 struct trace_iterator *iter;
4661 if (tracing_disabled)
4662 return ERR_PTR(-ENODEV);
4664 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4666 return ERR_PTR(-ENOMEM);
4668 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4670 if (!iter->buffer_iter)
4674 * trace_find_next_entry() may need to save off iter->ent.
4675 * It will place it into the iter->temp buffer. As most
4676 * events are less than 128, allocate a buffer of that size.
4677 * If one is greater, then trace_find_next_entry() will
4678 * allocate a new buffer to adjust for the bigger iter->ent.
4679 * It's not critical if it fails to get allocated here.
4681 iter->temp = kmalloc(128, GFP_KERNEL);
4683 iter->temp_size = 128;
4686 * trace_event_printf() may need to modify given format
4687 * string to replace %p with %px so that it shows real address
4688 * instead of hash value. However, that is only for the event
4689 * tracing, other tracer may not need. Defer the allocation
4690 * until it is needed.
4696 * We make a copy of the current tracer to avoid concurrent
4697 * changes on it while we are reading.
4699 mutex_lock(&trace_types_lock);
4700 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4704 *iter->trace = *tr->current_trace;
4706 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4711 #ifdef CONFIG_TRACER_MAX_TRACE
4712 /* Currently only the top directory has a snapshot */
4713 if (tr->current_trace->print_max || snapshot)
4714 iter->array_buffer = &tr->max_buffer;
4717 iter->array_buffer = &tr->array_buffer;
4718 iter->snapshot = snapshot;
4720 iter->cpu_file = tracing_get_cpu(inode);
4721 mutex_init(&iter->mutex);
4723 /* Notify the tracer early; before we stop tracing. */
4724 if (iter->trace->open)
4725 iter->trace->open(iter);
4727 /* Annotate start of buffers if we had overruns */
4728 if (ring_buffer_overruns(iter->array_buffer->buffer))
4729 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4731 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4732 if (trace_clocks[tr->clock_id].in_ns)
4733 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4736 * If pause-on-trace is enabled, then stop the trace while
4737 * dumping, unless this is the "snapshot" file
4739 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4740 tracing_stop_tr(tr);
4742 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4743 for_each_tracing_cpu(cpu) {
4744 iter->buffer_iter[cpu] =
4745 ring_buffer_read_prepare(iter->array_buffer->buffer,
4748 ring_buffer_read_prepare_sync();
4749 for_each_tracing_cpu(cpu) {
4750 ring_buffer_read_start(iter->buffer_iter[cpu]);
4751 tracing_iter_reset(iter, cpu);
4754 cpu = iter->cpu_file;
4755 iter->buffer_iter[cpu] =
4756 ring_buffer_read_prepare(iter->array_buffer->buffer,
4758 ring_buffer_read_prepare_sync();
4759 ring_buffer_read_start(iter->buffer_iter[cpu]);
4760 tracing_iter_reset(iter, cpu);
4763 mutex_unlock(&trace_types_lock);
4768 mutex_unlock(&trace_types_lock);
4771 kfree(iter->buffer_iter);
4773 seq_release_private(inode, file);
4774 return ERR_PTR(-ENOMEM);
4777 int tracing_open_generic(struct inode *inode, struct file *filp)
4781 ret = tracing_check_open_get_tr(NULL);
4785 filp->private_data = inode->i_private;
4789 bool tracing_is_disabled(void)
4791 return (tracing_disabled) ? true: false;
4795 * Open and update trace_array ref count.
4796 * Must have the current trace_array passed to it.
4798 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4800 struct trace_array *tr = inode->i_private;
4803 ret = tracing_check_open_get_tr(tr);
4807 filp->private_data = inode->i_private;
4812 static int tracing_release(struct inode *inode, struct file *file)
4814 struct trace_array *tr = inode->i_private;
4815 struct seq_file *m = file->private_data;
4816 struct trace_iterator *iter;
4819 if (!(file->f_mode & FMODE_READ)) {
4820 trace_array_put(tr);
4824 /* Writes do not use seq_file */
4826 mutex_lock(&trace_types_lock);
4828 for_each_tracing_cpu(cpu) {
4829 if (iter->buffer_iter[cpu])
4830 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4833 if (iter->trace && iter->trace->close)
4834 iter->trace->close(iter);
4836 if (!iter->snapshot && tr->stop_count)
4837 /* reenable tracing if it was previously enabled */
4838 tracing_start_tr(tr);
4840 __trace_array_put(tr);
4842 mutex_unlock(&trace_types_lock);
4844 mutex_destroy(&iter->mutex);
4845 free_cpumask_var(iter->started);
4849 kfree(iter->buffer_iter);
4850 seq_release_private(inode, file);
4855 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4857 struct trace_array *tr = inode->i_private;
4859 trace_array_put(tr);
4863 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4865 struct trace_array *tr = inode->i_private;
4867 trace_array_put(tr);
4869 return single_release(inode, file);
4872 static int tracing_open(struct inode *inode, struct file *file)
4874 struct trace_array *tr = inode->i_private;
4875 struct trace_iterator *iter;
4878 ret = tracing_check_open_get_tr(tr);
4882 /* If this file was open for write, then erase contents */
4883 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4884 int cpu = tracing_get_cpu(inode);
4885 struct array_buffer *trace_buf = &tr->array_buffer;
4887 #ifdef CONFIG_TRACER_MAX_TRACE
4888 if (tr->current_trace->print_max)
4889 trace_buf = &tr->max_buffer;
4892 if (cpu == RING_BUFFER_ALL_CPUS)
4893 tracing_reset_online_cpus(trace_buf);
4895 tracing_reset_cpu(trace_buf, cpu);
4898 if (file->f_mode & FMODE_READ) {
4899 iter = __tracing_open(inode, file, false);
4901 ret = PTR_ERR(iter);
4902 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4903 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4907 trace_array_put(tr);
4913 * Some tracers are not suitable for instance buffers.
4914 * A tracer is always available for the global array (toplevel)
4915 * or if it explicitly states that it is.
4918 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4920 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4923 /* Find the next tracer that this trace array may use */
4924 static struct tracer *
4925 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4927 while (t && !trace_ok_for_array(t, tr))
4934 t_next(struct seq_file *m, void *v, loff_t *pos)
4936 struct trace_array *tr = m->private;
4937 struct tracer *t = v;
4942 t = get_tracer_for_array(tr, t->next);
4947 static void *t_start(struct seq_file *m, loff_t *pos)
4949 struct trace_array *tr = m->private;
4953 mutex_lock(&trace_types_lock);
4955 t = get_tracer_for_array(tr, trace_types);
4956 for (; t && l < *pos; t = t_next(m, t, &l))
4962 static void t_stop(struct seq_file *m, void *p)
4964 mutex_unlock(&trace_types_lock);
4967 static int t_show(struct seq_file *m, void *v)
4969 struct tracer *t = v;
4974 seq_puts(m, t->name);
4983 static const struct seq_operations show_traces_seq_ops = {
4990 static int show_traces_open(struct inode *inode, struct file *file)
4992 struct trace_array *tr = inode->i_private;
4996 ret = tracing_check_open_get_tr(tr);
5000 ret = seq_open(file, &show_traces_seq_ops);
5002 trace_array_put(tr);
5006 m = file->private_data;
5012 static int show_traces_release(struct inode *inode, struct file *file)
5014 struct trace_array *tr = inode->i_private;
5016 trace_array_put(tr);
5017 return seq_release(inode, file);
5021 tracing_write_stub(struct file *filp, const char __user *ubuf,
5022 size_t count, loff_t *ppos)
5027 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5031 if (file->f_mode & FMODE_READ)
5032 ret = seq_lseek(file, offset, whence);
5034 file->f_pos = ret = 0;
5039 static const struct file_operations tracing_fops = {
5040 .open = tracing_open,
5042 .write = tracing_write_stub,
5043 .llseek = tracing_lseek,
5044 .release = tracing_release,
5047 static const struct file_operations show_traces_fops = {
5048 .open = show_traces_open,
5050 .llseek = seq_lseek,
5051 .release = show_traces_release,
5055 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5056 size_t count, loff_t *ppos)
5058 struct trace_array *tr = file_inode(filp)->i_private;
5062 len = snprintf(NULL, 0, "%*pb\n",
5063 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5064 mask_str = kmalloc(len, GFP_KERNEL);
5068 len = snprintf(mask_str, len, "%*pb\n",
5069 cpumask_pr_args(tr->tracing_cpumask));
5074 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5082 int tracing_set_cpumask(struct trace_array *tr,
5083 cpumask_var_t tracing_cpumask_new)
5090 local_irq_disable();
5091 arch_spin_lock(&tr->max_lock);
5092 for_each_tracing_cpu(cpu) {
5094 * Increase/decrease the disabled counter if we are
5095 * about to flip a bit in the cpumask:
5097 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5098 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5099 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5100 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5102 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5103 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5104 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5105 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5108 arch_spin_unlock(&tr->max_lock);
5111 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5117 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5118 size_t count, loff_t *ppos)
5120 struct trace_array *tr = file_inode(filp)->i_private;
5121 cpumask_var_t tracing_cpumask_new;
5124 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5127 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5131 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5135 free_cpumask_var(tracing_cpumask_new);
5140 free_cpumask_var(tracing_cpumask_new);
5145 static const struct file_operations tracing_cpumask_fops = {
5146 .open = tracing_open_generic_tr,
5147 .read = tracing_cpumask_read,
5148 .write = tracing_cpumask_write,
5149 .release = tracing_release_generic_tr,
5150 .llseek = generic_file_llseek,
5153 static int tracing_trace_options_show(struct seq_file *m, void *v)
5155 struct tracer_opt *trace_opts;
5156 struct trace_array *tr = m->private;
5160 mutex_lock(&trace_types_lock);
5161 tracer_flags = tr->current_trace->flags->val;
5162 trace_opts = tr->current_trace->flags->opts;
5164 for (i = 0; trace_options[i]; i++) {
5165 if (tr->trace_flags & (1 << i))
5166 seq_printf(m, "%s\n", trace_options[i]);
5168 seq_printf(m, "no%s\n", trace_options[i]);
5171 for (i = 0; trace_opts[i].name; i++) {
5172 if (tracer_flags & trace_opts[i].bit)
5173 seq_printf(m, "%s\n", trace_opts[i].name);
5175 seq_printf(m, "no%s\n", trace_opts[i].name);
5177 mutex_unlock(&trace_types_lock);
5182 static int __set_tracer_option(struct trace_array *tr,
5183 struct tracer_flags *tracer_flags,
5184 struct tracer_opt *opts, int neg)
5186 struct tracer *trace = tracer_flags->trace;
5189 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5194 tracer_flags->val &= ~opts->bit;
5196 tracer_flags->val |= opts->bit;
5200 /* Try to assign a tracer specific option */
5201 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5203 struct tracer *trace = tr->current_trace;
5204 struct tracer_flags *tracer_flags = trace->flags;
5205 struct tracer_opt *opts = NULL;
5208 for (i = 0; tracer_flags->opts[i].name; i++) {
5209 opts = &tracer_flags->opts[i];
5211 if (strcmp(cmp, opts->name) == 0)
5212 return __set_tracer_option(tr, trace->flags, opts, neg);
5218 /* Some tracers require overwrite to stay enabled */
5219 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5221 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5227 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5229 if ((mask == TRACE_ITER_RECORD_TGID) ||
5230 (mask == TRACE_ITER_RECORD_CMD))
5231 lockdep_assert_held(&event_mutex);
5233 /* do nothing if flag is already set */
5234 if (!!(tr->trace_flags & mask) == !!enabled)
5237 /* Give the tracer a chance to approve the change */
5238 if (tr->current_trace->flag_changed)
5239 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5243 tr->trace_flags |= mask;
5245 tr->trace_flags &= ~mask;
5247 if (mask == TRACE_ITER_RECORD_CMD)
5248 trace_event_enable_cmd_record(enabled);
5250 if (mask == TRACE_ITER_RECORD_TGID) {
5252 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
5256 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5260 trace_event_enable_tgid_record(enabled);
5263 if (mask == TRACE_ITER_EVENT_FORK)
5264 trace_event_follow_fork(tr, enabled);
5266 if (mask == TRACE_ITER_FUNC_FORK)
5267 ftrace_pid_follow_fork(tr, enabled);
5269 if (mask == TRACE_ITER_OVERWRITE) {
5270 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5271 #ifdef CONFIG_TRACER_MAX_TRACE
5272 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5276 if (mask == TRACE_ITER_PRINTK) {
5277 trace_printk_start_stop_comm(enabled);
5278 trace_printk_control(enabled);
5284 int trace_set_options(struct trace_array *tr, char *option)
5289 size_t orig_len = strlen(option);
5292 cmp = strstrip(option);
5294 len = str_has_prefix(cmp, "no");
5300 mutex_lock(&event_mutex);
5301 mutex_lock(&trace_types_lock);
5303 ret = match_string(trace_options, -1, cmp);
5304 /* If no option could be set, test the specific tracer options */
5306 ret = set_tracer_option(tr, cmp, neg);
5308 ret = set_tracer_flag(tr, 1 << ret, !neg);
5310 mutex_unlock(&trace_types_lock);
5311 mutex_unlock(&event_mutex);
5314 * If the first trailing whitespace is replaced with '\0' by strstrip,
5315 * turn it back into a space.
5317 if (orig_len > strlen(option))
5318 option[strlen(option)] = ' ';
5323 static void __init apply_trace_boot_options(void)
5325 char *buf = trace_boot_options_buf;
5329 option = strsep(&buf, ",");
5335 trace_set_options(&global_trace, option);
5337 /* Put back the comma to allow this to be called again */
5344 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5345 size_t cnt, loff_t *ppos)
5347 struct seq_file *m = filp->private_data;
5348 struct trace_array *tr = m->private;
5352 if (cnt >= sizeof(buf))
5355 if (copy_from_user(buf, ubuf, cnt))
5360 ret = trace_set_options(tr, buf);
5369 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5371 struct trace_array *tr = inode->i_private;
5374 ret = tracing_check_open_get_tr(tr);
5378 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5380 trace_array_put(tr);
5385 static const struct file_operations tracing_iter_fops = {
5386 .open = tracing_trace_options_open,
5388 .llseek = seq_lseek,
5389 .release = tracing_single_release_tr,
5390 .write = tracing_trace_options_write,
5393 static const char readme_msg[] =
5394 "tracing mini-HOWTO:\n\n"
5395 "# echo 0 > tracing_on : quick way to disable tracing\n"
5396 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5397 " Important files:\n"
5398 " trace\t\t\t- The static contents of the buffer\n"
5399 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5400 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5401 " current_tracer\t- function and latency tracers\n"
5402 " available_tracers\t- list of configured tracers for current_tracer\n"
5403 " error_log\t- error log for failed commands (that support it)\n"
5404 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5405 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5406 " trace_clock\t\t-change the clock used to order events\n"
5407 " local: Per cpu clock but may not be synced across CPUs\n"
5408 " global: Synced across CPUs but slows tracing down.\n"
5409 " counter: Not a clock, but just an increment\n"
5410 " uptime: Jiffy counter from time of boot\n"
5411 " perf: Same clock that perf events use\n"
5412 #ifdef CONFIG_X86_64
5413 " x86-tsc: TSC cycle counter\n"
5415 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5416 " delta: Delta difference against a buffer-wide timestamp\n"
5417 " absolute: Absolute (standalone) timestamp\n"
5418 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5419 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5420 " tracing_cpumask\t- Limit which CPUs to trace\n"
5421 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5422 "\t\t\t Remove sub-buffer with rmdir\n"
5423 " trace_options\t\t- Set format or modify how tracing happens\n"
5424 "\t\t\t Disable an option by prefixing 'no' to the\n"
5425 "\t\t\t option name\n"
5426 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5427 #ifdef CONFIG_DYNAMIC_FTRACE
5428 "\n available_filter_functions - list of functions that can be filtered on\n"
5429 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5430 "\t\t\t functions\n"
5431 "\t accepts: func_full_name or glob-matching-pattern\n"
5432 "\t modules: Can select a group via module\n"
5433 "\t Format: :mod:<module-name>\n"
5434 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5435 "\t triggers: a command to perform when function is hit\n"
5436 "\t Format: <function>:<trigger>[:count]\n"
5437 "\t trigger: traceon, traceoff\n"
5438 "\t\t enable_event:<system>:<event>\n"
5439 "\t\t disable_event:<system>:<event>\n"
5440 #ifdef CONFIG_STACKTRACE
5443 #ifdef CONFIG_TRACER_SNAPSHOT
5448 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5449 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5450 "\t The first one will disable tracing every time do_fault is hit\n"
5451 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5452 "\t The first time do trap is hit and it disables tracing, the\n"
5453 "\t counter will decrement to 2. If tracing is already disabled,\n"
5454 "\t the counter will not decrement. It only decrements when the\n"
5455 "\t trigger did work\n"
5456 "\t To remove trigger without count:\n"
5457 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5458 "\t To remove trigger with a count:\n"
5459 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5460 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5461 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5462 "\t modules: Can select a group via module command :mod:\n"
5463 "\t Does not accept triggers\n"
5464 #endif /* CONFIG_DYNAMIC_FTRACE */
5465 #ifdef CONFIG_FUNCTION_TRACER
5466 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5468 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5471 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5472 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5473 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5474 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5476 #ifdef CONFIG_TRACER_SNAPSHOT
5477 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5478 "\t\t\t snapshot buffer. Read the contents for more\n"
5479 "\t\t\t information\n"
5481 #ifdef CONFIG_STACK_TRACER
5482 " stack_trace\t\t- Shows the max stack trace when active\n"
5483 " stack_max_size\t- Shows current max stack size that was traced\n"
5484 "\t\t\t Write into this file to reset the max size (trigger a\n"
5485 "\t\t\t new trace)\n"
5486 #ifdef CONFIG_DYNAMIC_FTRACE
5487 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5490 #endif /* CONFIG_STACK_TRACER */
5491 #ifdef CONFIG_DYNAMIC_EVENTS
5492 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5493 "\t\t\t Write into this file to define/undefine new trace events.\n"
5495 #ifdef CONFIG_KPROBE_EVENTS
5496 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5497 "\t\t\t Write into this file to define/undefine new trace events.\n"
5499 #ifdef CONFIG_UPROBE_EVENTS
5500 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5501 "\t\t\t Write into this file to define/undefine new trace events.\n"
5503 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5504 "\t accepts: event-definitions (one definition per line)\n"
5505 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5506 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5507 #ifdef CONFIG_HIST_TRIGGERS
5508 "\t s:[synthetic/]<event> <field> [<field>]\n"
5510 "\t -:[<group>/]<event>\n"
5511 #ifdef CONFIG_KPROBE_EVENTS
5512 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5513 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5515 #ifdef CONFIG_UPROBE_EVENTS
5516 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5518 "\t args: <name>=fetcharg[:type]\n"
5519 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5520 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5521 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5523 "\t $stack<index>, $stack, $retval, $comm,\n"
5525 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5526 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5527 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5528 "\t <type>\\[<array-size>\\]\n"
5529 #ifdef CONFIG_HIST_TRIGGERS
5530 "\t field: <stype> <name>;\n"
5531 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5532 "\t [unsigned] char/int/long\n"
5535 " events/\t\t- Directory containing all trace event subsystems:\n"
5536 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5537 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5538 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5540 " filter\t\t- If set, only events passing filter are traced\n"
5541 " events/<system>/<event>/\t- Directory containing control files for\n"
5543 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5544 " filter\t\t- If set, only events passing filter are traced\n"
5545 " trigger\t\t- If set, a command to perform when event is hit\n"
5546 "\t Format: <trigger>[:count][if <filter>]\n"
5547 "\t trigger: traceon, traceoff\n"
5548 "\t enable_event:<system>:<event>\n"
5549 "\t disable_event:<system>:<event>\n"
5550 #ifdef CONFIG_HIST_TRIGGERS
5551 "\t enable_hist:<system>:<event>\n"
5552 "\t disable_hist:<system>:<event>\n"
5554 #ifdef CONFIG_STACKTRACE
5557 #ifdef CONFIG_TRACER_SNAPSHOT
5560 #ifdef CONFIG_HIST_TRIGGERS
5561 "\t\t hist (see below)\n"
5563 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5564 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5565 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5566 "\t events/block/block_unplug/trigger\n"
5567 "\t The first disables tracing every time block_unplug is hit.\n"
5568 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5569 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5570 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5571 "\t Like function triggers, the counter is only decremented if it\n"
5572 "\t enabled or disabled tracing.\n"
5573 "\t To remove a trigger without a count:\n"
5574 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5575 "\t To remove a trigger with a count:\n"
5576 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5577 "\t Filters can be ignored when removing a trigger.\n"
5578 #ifdef CONFIG_HIST_TRIGGERS
5579 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5580 "\t Format: hist:keys=<field1[,field2,...]>\n"
5581 "\t [:values=<field1[,field2,...]>]\n"
5582 "\t [:sort=<field1[,field2,...]>]\n"
5583 "\t [:size=#entries]\n"
5584 "\t [:pause][:continue][:clear]\n"
5585 "\t [:name=histname1]\n"
5586 "\t [:<handler>.<action>]\n"
5587 "\t [if <filter>]\n\n"
5588 "\t When a matching event is hit, an entry is added to a hash\n"
5589 "\t table using the key(s) and value(s) named, and the value of a\n"
5590 "\t sum called 'hitcount' is incremented. Keys and values\n"
5591 "\t correspond to fields in the event's format description. Keys\n"
5592 "\t can be any field, or the special string 'stacktrace'.\n"
5593 "\t Compound keys consisting of up to two fields can be specified\n"
5594 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5595 "\t fields. Sort keys consisting of up to two fields can be\n"
5596 "\t specified using the 'sort' keyword. The sort direction can\n"
5597 "\t be modified by appending '.descending' or '.ascending' to a\n"
5598 "\t sort field. The 'size' parameter can be used to specify more\n"
5599 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5600 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5601 "\t its histogram data will be shared with other triggers of the\n"
5602 "\t same name, and trigger hits will update this common data.\n\n"
5603 "\t Reading the 'hist' file for the event will dump the hash\n"
5604 "\t table in its entirety to stdout. If there are multiple hist\n"
5605 "\t triggers attached to an event, there will be a table for each\n"
5606 "\t trigger in the output. The table displayed for a named\n"
5607 "\t trigger will be the same as any other instance having the\n"
5608 "\t same name. The default format used to display a given field\n"
5609 "\t can be modified by appending any of the following modifiers\n"
5610 "\t to the field name, as applicable:\n\n"
5611 "\t .hex display a number as a hex value\n"
5612 "\t .sym display an address as a symbol\n"
5613 "\t .sym-offset display an address as a symbol and offset\n"
5614 "\t .execname display a common_pid as a program name\n"
5615 "\t .syscall display a syscall id as a syscall name\n"
5616 "\t .log2 display log2 value rather than raw number\n"
5617 "\t .usecs display a common_timestamp in microseconds\n\n"
5618 "\t The 'pause' parameter can be used to pause an existing hist\n"
5619 "\t trigger or to start a hist trigger but not log any events\n"
5620 "\t until told to do so. 'continue' can be used to start or\n"
5621 "\t restart a paused hist trigger.\n\n"
5622 "\t The 'clear' parameter will clear the contents of a running\n"
5623 "\t hist trigger and leave its current paused/active state\n"
5625 "\t The enable_hist and disable_hist triggers can be used to\n"
5626 "\t have one event conditionally start and stop another event's\n"
5627 "\t already-attached hist trigger. The syntax is analogous to\n"
5628 "\t the enable_event and disable_event triggers.\n\n"
5629 "\t Hist trigger handlers and actions are executed whenever a\n"
5630 "\t a histogram entry is added or updated. They take the form:\n\n"
5631 "\t <handler>.<action>\n\n"
5632 "\t The available handlers are:\n\n"
5633 "\t onmatch(matching.event) - invoke on addition or update\n"
5634 "\t onmax(var) - invoke if var exceeds current max\n"
5635 "\t onchange(var) - invoke action if var changes\n\n"
5636 "\t The available actions are:\n\n"
5637 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5638 "\t save(field,...) - save current event fields\n"
5639 #ifdef CONFIG_TRACER_SNAPSHOT
5640 "\t snapshot() - snapshot the trace buffer\n\n"
5642 #ifdef CONFIG_SYNTH_EVENTS
5643 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5644 "\t Write into this file to define/undefine new synthetic events.\n"
5645 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5651 tracing_readme_read(struct file *filp, char __user *ubuf,
5652 size_t cnt, loff_t *ppos)
5654 return simple_read_from_buffer(ubuf, cnt, ppos,
5655 readme_msg, strlen(readme_msg));
5658 static const struct file_operations tracing_readme_fops = {
5659 .open = tracing_open_generic,
5660 .read = tracing_readme_read,
5661 .llseek = generic_file_llseek,
5664 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5668 if (*pos || m->count)
5673 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5674 if (trace_find_tgid(*ptr))
5681 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5691 v = saved_tgids_next(m, v, &l);
5699 static void saved_tgids_stop(struct seq_file *m, void *v)
5703 static int saved_tgids_show(struct seq_file *m, void *v)
5705 int pid = (int *)v - tgid_map;
5707 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5711 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5712 .start = saved_tgids_start,
5713 .stop = saved_tgids_stop,
5714 .next = saved_tgids_next,
5715 .show = saved_tgids_show,
5718 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5722 ret = tracing_check_open_get_tr(NULL);
5726 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5730 static const struct file_operations tracing_saved_tgids_fops = {
5731 .open = tracing_saved_tgids_open,
5733 .llseek = seq_lseek,
5734 .release = seq_release,
5737 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5739 unsigned int *ptr = v;
5741 if (*pos || m->count)
5746 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5748 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5757 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5763 arch_spin_lock(&trace_cmdline_lock);
5765 v = &savedcmd->map_cmdline_to_pid[0];
5767 v = saved_cmdlines_next(m, v, &l);
5775 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5777 arch_spin_unlock(&trace_cmdline_lock);
5781 static int saved_cmdlines_show(struct seq_file *m, void *v)
5783 char buf[TASK_COMM_LEN];
5784 unsigned int *pid = v;
5786 __trace_find_cmdline(*pid, buf);
5787 seq_printf(m, "%d %s\n", *pid, buf);
5791 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5792 .start = saved_cmdlines_start,
5793 .next = saved_cmdlines_next,
5794 .stop = saved_cmdlines_stop,
5795 .show = saved_cmdlines_show,
5798 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5802 ret = tracing_check_open_get_tr(NULL);
5806 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5809 static const struct file_operations tracing_saved_cmdlines_fops = {
5810 .open = tracing_saved_cmdlines_open,
5812 .llseek = seq_lseek,
5813 .release = seq_release,
5817 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5818 size_t cnt, loff_t *ppos)
5823 arch_spin_lock(&trace_cmdline_lock);
5824 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5825 arch_spin_unlock(&trace_cmdline_lock);
5827 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5830 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5832 kfree(s->saved_cmdlines);
5833 kfree(s->map_cmdline_to_pid);
5837 static int tracing_resize_saved_cmdlines(unsigned int val)
5839 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5841 s = kmalloc(sizeof(*s), GFP_KERNEL);
5845 if (allocate_cmdlines_buffer(val, s) < 0) {
5850 arch_spin_lock(&trace_cmdline_lock);
5851 savedcmd_temp = savedcmd;
5853 arch_spin_unlock(&trace_cmdline_lock);
5854 free_saved_cmdlines_buffer(savedcmd_temp);
5860 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5861 size_t cnt, loff_t *ppos)
5866 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5870 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5871 if (!val || val > PID_MAX_DEFAULT)
5874 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5883 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5884 .open = tracing_open_generic,
5885 .read = tracing_saved_cmdlines_size_read,
5886 .write = tracing_saved_cmdlines_size_write,
5889 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5890 static union trace_eval_map_item *
5891 update_eval_map(union trace_eval_map_item *ptr)
5893 if (!ptr->map.eval_string) {
5894 if (ptr->tail.next) {
5895 ptr = ptr->tail.next;
5896 /* Set ptr to the next real item (skip head) */
5904 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5906 union trace_eval_map_item *ptr = v;
5909 * Paranoid! If ptr points to end, we don't want to increment past it.
5910 * This really should never happen.
5913 ptr = update_eval_map(ptr);
5914 if (WARN_ON_ONCE(!ptr))
5918 ptr = update_eval_map(ptr);
5923 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5925 union trace_eval_map_item *v;
5928 mutex_lock(&trace_eval_mutex);
5930 v = trace_eval_maps;
5934 while (v && l < *pos) {
5935 v = eval_map_next(m, v, &l);
5941 static void eval_map_stop(struct seq_file *m, void *v)
5943 mutex_unlock(&trace_eval_mutex);
5946 static int eval_map_show(struct seq_file *m, void *v)
5948 union trace_eval_map_item *ptr = v;
5950 seq_printf(m, "%s %ld (%s)\n",
5951 ptr->map.eval_string, ptr->map.eval_value,
5957 static const struct seq_operations tracing_eval_map_seq_ops = {
5958 .start = eval_map_start,
5959 .next = eval_map_next,
5960 .stop = eval_map_stop,
5961 .show = eval_map_show,
5964 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5968 ret = tracing_check_open_get_tr(NULL);
5972 return seq_open(filp, &tracing_eval_map_seq_ops);
5975 static const struct file_operations tracing_eval_map_fops = {
5976 .open = tracing_eval_map_open,
5978 .llseek = seq_lseek,
5979 .release = seq_release,
5982 static inline union trace_eval_map_item *
5983 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5985 /* Return tail of array given the head */
5986 return ptr + ptr->head.length + 1;
5990 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5993 struct trace_eval_map **stop;
5994 struct trace_eval_map **map;
5995 union trace_eval_map_item *map_array;
5996 union trace_eval_map_item *ptr;
6001 * The trace_eval_maps contains the map plus a head and tail item,
6002 * where the head holds the module and length of array, and the
6003 * tail holds a pointer to the next list.
6005 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6007 pr_warn("Unable to allocate trace eval mapping\n");
6011 mutex_lock(&trace_eval_mutex);
6013 if (!trace_eval_maps)
6014 trace_eval_maps = map_array;
6016 ptr = trace_eval_maps;
6018 ptr = trace_eval_jmp_to_tail(ptr);
6019 if (!ptr->tail.next)
6021 ptr = ptr->tail.next;
6024 ptr->tail.next = map_array;
6026 map_array->head.mod = mod;
6027 map_array->head.length = len;
6030 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6031 map_array->map = **map;
6034 memset(map_array, 0, sizeof(*map_array));
6036 mutex_unlock(&trace_eval_mutex);
6039 static void trace_create_eval_file(struct dentry *d_tracer)
6041 trace_create_file("eval_map", 0444, d_tracer,
6042 NULL, &tracing_eval_map_fops);
6045 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6046 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6047 static inline void trace_insert_eval_map_file(struct module *mod,
6048 struct trace_eval_map **start, int len) { }
6049 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6051 static void trace_insert_eval_map(struct module *mod,
6052 struct trace_eval_map **start, int len)
6054 struct trace_eval_map **map;
6061 trace_event_eval_update(map, len);
6063 trace_insert_eval_map_file(mod, start, len);
6067 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6068 size_t cnt, loff_t *ppos)
6070 struct trace_array *tr = filp->private_data;
6071 char buf[MAX_TRACER_SIZE+2];
6074 mutex_lock(&trace_types_lock);
6075 r = sprintf(buf, "%s\n", tr->current_trace->name);
6076 mutex_unlock(&trace_types_lock);
6078 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6081 int tracer_init(struct tracer *t, struct trace_array *tr)
6083 tracing_reset_online_cpus(&tr->array_buffer);
6087 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6091 for_each_tracing_cpu(cpu)
6092 per_cpu_ptr(buf->data, cpu)->entries = val;
6095 #ifdef CONFIG_TRACER_MAX_TRACE
6096 /* resize @tr's buffer to the size of @size_tr's entries */
6097 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6098 struct array_buffer *size_buf, int cpu_id)
6102 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6103 for_each_tracing_cpu(cpu) {
6104 ret = ring_buffer_resize(trace_buf->buffer,
6105 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6108 per_cpu_ptr(trace_buf->data, cpu)->entries =
6109 per_cpu_ptr(size_buf->data, cpu)->entries;
6112 ret = ring_buffer_resize(trace_buf->buffer,
6113 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6115 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6116 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6121 #endif /* CONFIG_TRACER_MAX_TRACE */
6123 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6124 unsigned long size, int cpu)
6129 * If kernel or user changes the size of the ring buffer
6130 * we use the size that was given, and we can forget about
6131 * expanding it later.
6133 ring_buffer_expanded = true;
6135 /* May be called before buffers are initialized */
6136 if (!tr->array_buffer.buffer)
6139 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6143 #ifdef CONFIG_TRACER_MAX_TRACE
6144 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6145 !tr->current_trace->use_max_tr)
6148 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6150 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6151 &tr->array_buffer, cpu);
6154 * AARGH! We are left with different
6155 * size max buffer!!!!
6156 * The max buffer is our "snapshot" buffer.
6157 * When a tracer needs a snapshot (one of the
6158 * latency tracers), it swaps the max buffer
6159 * with the saved snap shot. We succeeded to
6160 * update the size of the main buffer, but failed to
6161 * update the size of the max buffer. But when we tried
6162 * to reset the main buffer to the original size, we
6163 * failed there too. This is very unlikely to
6164 * happen, but if it does, warn and kill all
6168 tracing_disabled = 1;
6173 if (cpu == RING_BUFFER_ALL_CPUS)
6174 set_buffer_entries(&tr->max_buffer, size);
6176 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6179 #endif /* CONFIG_TRACER_MAX_TRACE */
6181 if (cpu == RING_BUFFER_ALL_CPUS)
6182 set_buffer_entries(&tr->array_buffer, size);
6184 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6189 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6190 unsigned long size, int cpu_id)
6194 mutex_lock(&trace_types_lock);
6196 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6197 /* make sure, this cpu is enabled in the mask */
6198 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6204 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6209 mutex_unlock(&trace_types_lock);
6216 * tracing_update_buffers - used by tracing facility to expand ring buffers
6218 * To save on memory when the tracing is never used on a system with it
6219 * configured in. The ring buffers are set to a minimum size. But once
6220 * a user starts to use the tracing facility, then they need to grow
6221 * to their default size.
6223 * This function is to be called when a tracer is about to be used.
6225 int tracing_update_buffers(void)
6229 mutex_lock(&trace_types_lock);
6230 if (!ring_buffer_expanded)
6231 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6232 RING_BUFFER_ALL_CPUS);
6233 mutex_unlock(&trace_types_lock);
6238 struct trace_option_dentry;
6241 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6244 * Used to clear out the tracer before deletion of an instance.
6245 * Must have trace_types_lock held.
6247 static void tracing_set_nop(struct trace_array *tr)
6249 if (tr->current_trace == &nop_trace)
6252 tr->current_trace->enabled--;
6254 if (tr->current_trace->reset)
6255 tr->current_trace->reset(tr);
6257 tr->current_trace = &nop_trace;
6260 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6262 /* Only enable if the directory has been created already. */
6266 create_trace_option_files(tr, t);
6269 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6272 #ifdef CONFIG_TRACER_MAX_TRACE
6277 mutex_lock(&trace_types_lock);
6279 if (!ring_buffer_expanded) {
6280 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6281 RING_BUFFER_ALL_CPUS);
6287 for (t = trace_types; t; t = t->next) {
6288 if (strcmp(t->name, buf) == 0)
6295 if (t == tr->current_trace)
6298 #ifdef CONFIG_TRACER_SNAPSHOT
6299 if (t->use_max_tr) {
6300 arch_spin_lock(&tr->max_lock);
6301 if (tr->cond_snapshot)
6303 arch_spin_unlock(&tr->max_lock);
6308 /* Some tracers won't work on kernel command line */
6309 if (system_state < SYSTEM_RUNNING && t->noboot) {
6310 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6315 /* Some tracers are only allowed for the top level buffer */
6316 if (!trace_ok_for_array(t, tr)) {
6321 /* If trace pipe files are being read, we can't change the tracer */
6322 if (tr->trace_ref) {
6327 trace_branch_disable();
6329 tr->current_trace->enabled--;
6331 if (tr->current_trace->reset)
6332 tr->current_trace->reset(tr);
6334 /* Current trace needs to be nop_trace before synchronize_rcu */
6335 tr->current_trace = &nop_trace;
6337 #ifdef CONFIG_TRACER_MAX_TRACE
6338 had_max_tr = tr->allocated_snapshot;
6340 if (had_max_tr && !t->use_max_tr) {
6342 * We need to make sure that the update_max_tr sees that
6343 * current_trace changed to nop_trace to keep it from
6344 * swapping the buffers after we resize it.
6345 * The update_max_tr is called from interrupts disabled
6346 * so a synchronized_sched() is sufficient.
6353 #ifdef CONFIG_TRACER_MAX_TRACE
6354 if (t->use_max_tr && !had_max_tr) {
6355 ret = tracing_alloc_snapshot_instance(tr);
6362 ret = tracer_init(t, tr);
6367 tr->current_trace = t;
6368 tr->current_trace->enabled++;
6369 trace_branch_enable(tr);
6371 mutex_unlock(&trace_types_lock);
6377 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6378 size_t cnt, loff_t *ppos)
6380 struct trace_array *tr = filp->private_data;
6381 char buf[MAX_TRACER_SIZE+1];
6388 if (cnt > MAX_TRACER_SIZE)
6389 cnt = MAX_TRACER_SIZE;
6391 if (copy_from_user(buf, ubuf, cnt))
6396 /* strip ending whitespace. */
6397 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6400 err = tracing_set_tracer(tr, buf);
6410 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6411 size_t cnt, loff_t *ppos)
6416 r = snprintf(buf, sizeof(buf), "%ld\n",
6417 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6418 if (r > sizeof(buf))
6420 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6424 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6425 size_t cnt, loff_t *ppos)
6430 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6440 tracing_thresh_read(struct file *filp, char __user *ubuf,
6441 size_t cnt, loff_t *ppos)
6443 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6447 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6448 size_t cnt, loff_t *ppos)
6450 struct trace_array *tr = filp->private_data;
6453 mutex_lock(&trace_types_lock);
6454 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6458 if (tr->current_trace->update_thresh) {
6459 ret = tr->current_trace->update_thresh(tr);
6466 mutex_unlock(&trace_types_lock);
6471 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6474 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6475 size_t cnt, loff_t *ppos)
6477 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6481 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6482 size_t cnt, loff_t *ppos)
6484 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6489 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6491 struct trace_array *tr = inode->i_private;
6492 struct trace_iterator *iter;
6495 ret = tracing_check_open_get_tr(tr);
6499 mutex_lock(&trace_types_lock);
6501 /* create a buffer to store the information to pass to userspace */
6502 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6505 __trace_array_put(tr);
6509 trace_seq_init(&iter->seq);
6510 iter->trace = tr->current_trace;
6512 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6517 /* trace pipe does not show start of buffer */
6518 cpumask_setall(iter->started);
6520 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6521 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6523 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6524 if (trace_clocks[tr->clock_id].in_ns)
6525 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6528 iter->array_buffer = &tr->array_buffer;
6529 iter->cpu_file = tracing_get_cpu(inode);
6530 mutex_init(&iter->mutex);
6531 filp->private_data = iter;
6533 if (iter->trace->pipe_open)
6534 iter->trace->pipe_open(iter);
6536 nonseekable_open(inode, filp);
6540 mutex_unlock(&trace_types_lock);
6545 __trace_array_put(tr);
6546 mutex_unlock(&trace_types_lock);
6550 static int tracing_release_pipe(struct inode *inode, struct file *file)
6552 struct trace_iterator *iter = file->private_data;
6553 struct trace_array *tr = inode->i_private;
6555 mutex_lock(&trace_types_lock);
6559 if (iter->trace->pipe_close)
6560 iter->trace->pipe_close(iter);
6562 mutex_unlock(&trace_types_lock);
6564 free_cpumask_var(iter->started);
6565 mutex_destroy(&iter->mutex);
6568 trace_array_put(tr);
6574 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6576 struct trace_array *tr = iter->tr;
6578 /* Iterators are static, they should be filled or empty */
6579 if (trace_buffer_iter(iter, iter->cpu_file))
6580 return EPOLLIN | EPOLLRDNORM;
6582 if (tr->trace_flags & TRACE_ITER_BLOCK)
6584 * Always select as readable when in blocking mode
6586 return EPOLLIN | EPOLLRDNORM;
6588 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6593 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6595 struct trace_iterator *iter = filp->private_data;
6597 return trace_poll(iter, filp, poll_table);
6600 /* Must be called with iter->mutex held. */
6601 static int tracing_wait_pipe(struct file *filp)
6603 struct trace_iterator *iter = filp->private_data;
6606 while (trace_empty(iter)) {
6608 if ((filp->f_flags & O_NONBLOCK)) {
6613 * We block until we read something and tracing is disabled.
6614 * We still block if tracing is disabled, but we have never
6615 * read anything. This allows a user to cat this file, and
6616 * then enable tracing. But after we have read something,
6617 * we give an EOF when tracing is again disabled.
6619 * iter->pos will be 0 if we haven't read anything.
6621 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6624 mutex_unlock(&iter->mutex);
6626 ret = wait_on_pipe(iter, 0);
6628 mutex_lock(&iter->mutex);
6641 tracing_read_pipe(struct file *filp, char __user *ubuf,
6642 size_t cnt, loff_t *ppos)
6644 struct trace_iterator *iter = filp->private_data;
6648 * Avoid more than one consumer on a single file descriptor
6649 * This is just a matter of traces coherency, the ring buffer itself
6652 mutex_lock(&iter->mutex);
6654 /* return any leftover data */
6655 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6659 trace_seq_init(&iter->seq);
6661 if (iter->trace->read) {
6662 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6668 sret = tracing_wait_pipe(filp);
6672 /* stop when tracing is finished */
6673 if (trace_empty(iter)) {
6678 if (cnt >= PAGE_SIZE)
6679 cnt = PAGE_SIZE - 1;
6681 /* reset all but tr, trace, and overruns */
6682 memset(&iter->seq, 0,
6683 sizeof(struct trace_iterator) -
6684 offsetof(struct trace_iterator, seq));
6685 cpumask_clear(iter->started);
6686 trace_seq_init(&iter->seq);
6689 trace_event_read_lock();
6690 trace_access_lock(iter->cpu_file);
6691 while (trace_find_next_entry_inc(iter) != NULL) {
6692 enum print_line_t ret;
6693 int save_len = iter->seq.seq.len;
6695 ret = print_trace_line(iter);
6696 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6697 /* don't print partial lines */
6698 iter->seq.seq.len = save_len;
6701 if (ret != TRACE_TYPE_NO_CONSUME)
6702 trace_consume(iter);
6704 if (trace_seq_used(&iter->seq) >= cnt)
6708 * Setting the full flag means we reached the trace_seq buffer
6709 * size and we should leave by partial output condition above.
6710 * One of the trace_seq_* functions is not used properly.
6712 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6715 trace_access_unlock(iter->cpu_file);
6716 trace_event_read_unlock();
6718 /* Now copy what we have to the user */
6719 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6720 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6721 trace_seq_init(&iter->seq);
6724 * If there was nothing to send to user, in spite of consuming trace
6725 * entries, go back to wait for more entries.
6731 mutex_unlock(&iter->mutex);
6736 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6739 __free_page(spd->pages[idx]);
6743 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6749 /* Seq buffer is page-sized, exactly what we need. */
6751 save_len = iter->seq.seq.len;
6752 ret = print_trace_line(iter);
6754 if (trace_seq_has_overflowed(&iter->seq)) {
6755 iter->seq.seq.len = save_len;
6760 * This should not be hit, because it should only
6761 * be set if the iter->seq overflowed. But check it
6762 * anyway to be safe.
6764 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6765 iter->seq.seq.len = save_len;
6769 count = trace_seq_used(&iter->seq) - save_len;
6772 iter->seq.seq.len = save_len;
6776 if (ret != TRACE_TYPE_NO_CONSUME)
6777 trace_consume(iter);
6779 if (!trace_find_next_entry_inc(iter)) {
6789 static ssize_t tracing_splice_read_pipe(struct file *filp,
6791 struct pipe_inode_info *pipe,
6795 struct page *pages_def[PIPE_DEF_BUFFERS];
6796 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6797 struct trace_iterator *iter = filp->private_data;
6798 struct splice_pipe_desc spd = {
6800 .partial = partial_def,
6801 .nr_pages = 0, /* This gets updated below. */
6802 .nr_pages_max = PIPE_DEF_BUFFERS,
6803 .ops = &default_pipe_buf_ops,
6804 .spd_release = tracing_spd_release_pipe,
6810 if (splice_grow_spd(pipe, &spd))
6813 mutex_lock(&iter->mutex);
6815 if (iter->trace->splice_read) {
6816 ret = iter->trace->splice_read(iter, filp,
6817 ppos, pipe, len, flags);
6822 ret = tracing_wait_pipe(filp);
6826 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6831 trace_event_read_lock();
6832 trace_access_lock(iter->cpu_file);
6834 /* Fill as many pages as possible. */
6835 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6836 spd.pages[i] = alloc_page(GFP_KERNEL);
6840 rem = tracing_fill_pipe_page(rem, iter);
6842 /* Copy the data into the page, so we can start over. */
6843 ret = trace_seq_to_buffer(&iter->seq,
6844 page_address(spd.pages[i]),
6845 trace_seq_used(&iter->seq));
6847 __free_page(spd.pages[i]);
6850 spd.partial[i].offset = 0;
6851 spd.partial[i].len = trace_seq_used(&iter->seq);
6853 trace_seq_init(&iter->seq);
6856 trace_access_unlock(iter->cpu_file);
6857 trace_event_read_unlock();
6858 mutex_unlock(&iter->mutex);
6863 ret = splice_to_pipe(pipe, &spd);
6867 splice_shrink_spd(&spd);
6871 mutex_unlock(&iter->mutex);
6876 tracing_entries_read(struct file *filp, char __user *ubuf,
6877 size_t cnt, loff_t *ppos)
6879 struct inode *inode = file_inode(filp);
6880 struct trace_array *tr = inode->i_private;
6881 int cpu = tracing_get_cpu(inode);
6886 mutex_lock(&trace_types_lock);
6888 if (cpu == RING_BUFFER_ALL_CPUS) {
6889 int cpu, buf_size_same;
6894 /* check if all cpu sizes are same */
6895 for_each_tracing_cpu(cpu) {
6896 /* fill in the size from first enabled cpu */
6898 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6899 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6905 if (buf_size_same) {
6906 if (!ring_buffer_expanded)
6907 r = sprintf(buf, "%lu (expanded: %lu)\n",
6909 trace_buf_size >> 10);
6911 r = sprintf(buf, "%lu\n", size >> 10);
6913 r = sprintf(buf, "X\n");
6915 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6917 mutex_unlock(&trace_types_lock);
6919 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6924 tracing_entries_write(struct file *filp, const char __user *ubuf,
6925 size_t cnt, loff_t *ppos)
6927 struct inode *inode = file_inode(filp);
6928 struct trace_array *tr = inode->i_private;
6932 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6936 /* must have at least 1 entry */
6940 /* value is in KB */
6942 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6952 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6953 size_t cnt, loff_t *ppos)
6955 struct trace_array *tr = filp->private_data;
6958 unsigned long size = 0, expanded_size = 0;
6960 mutex_lock(&trace_types_lock);
6961 for_each_tracing_cpu(cpu) {
6962 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6963 if (!ring_buffer_expanded)
6964 expanded_size += trace_buf_size >> 10;
6966 if (ring_buffer_expanded)
6967 r = sprintf(buf, "%lu\n", size);
6969 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6970 mutex_unlock(&trace_types_lock);
6972 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6976 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6977 size_t cnt, loff_t *ppos)
6980 * There is no need to read what the user has written, this function
6981 * is just to make sure that there is no error when "echo" is used
6990 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6992 struct trace_array *tr = inode->i_private;
6994 /* disable tracing ? */
6995 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6996 tracer_tracing_off(tr);
6997 /* resize the ring buffer to 0 */
6998 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7000 trace_array_put(tr);
7006 tracing_mark_write(struct file *filp, const char __user *ubuf,
7007 size_t cnt, loff_t *fpos)
7009 struct trace_array *tr = filp->private_data;
7010 struct ring_buffer_event *event;
7011 enum event_trigger_type tt = ETT_NONE;
7012 struct trace_buffer *buffer;
7013 struct print_entry *entry;
7018 /* Used in tracing_mark_raw_write() as well */
7019 #define FAULTED_STR "<faulted>"
7020 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7022 if (tracing_disabled)
7025 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7028 if (cnt > TRACE_BUF_SIZE)
7029 cnt = TRACE_BUF_SIZE;
7031 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7033 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7035 /* If less than "<faulted>", then make sure we can still add that */
7036 if (cnt < FAULTED_SIZE)
7037 size += FAULTED_SIZE - cnt;
7039 buffer = tr->array_buffer.buffer;
7040 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7042 if (unlikely(!event))
7043 /* Ring buffer disabled, return as if not open for write */
7046 entry = ring_buffer_event_data(event);
7047 entry->ip = _THIS_IP_;
7049 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7051 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7057 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7058 /* do not add \n before testing triggers, but add \0 */
7059 entry->buf[cnt] = '\0';
7060 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7063 if (entry->buf[cnt - 1] != '\n') {
7064 entry->buf[cnt] = '\n';
7065 entry->buf[cnt + 1] = '\0';
7067 entry->buf[cnt] = '\0';
7069 if (static_branch_unlikely(&trace_marker_exports_enabled))
7070 ftrace_exports(event, TRACE_EXPORT_MARKER);
7071 __buffer_unlock_commit(buffer, event);
7074 event_triggers_post_call(tr->trace_marker_file, tt);
7082 /* Limit it for now to 3K (including tag) */
7083 #define RAW_DATA_MAX_SIZE (1024*3)
7086 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7087 size_t cnt, loff_t *fpos)
7089 struct trace_array *tr = filp->private_data;
7090 struct ring_buffer_event *event;
7091 struct trace_buffer *buffer;
7092 struct raw_data_entry *entry;
7097 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7099 if (tracing_disabled)
7102 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7105 /* The marker must at least have a tag id */
7106 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7109 if (cnt > TRACE_BUF_SIZE)
7110 cnt = TRACE_BUF_SIZE;
7112 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7114 size = sizeof(*entry) + cnt;
7115 if (cnt < FAULT_SIZE_ID)
7116 size += FAULT_SIZE_ID - cnt;
7118 buffer = tr->array_buffer.buffer;
7119 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7122 /* Ring buffer disabled, return as if not open for write */
7125 entry = ring_buffer_event_data(event);
7127 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7130 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7135 __buffer_unlock_commit(buffer, event);
7143 static int tracing_clock_show(struct seq_file *m, void *v)
7145 struct trace_array *tr = m->private;
7148 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7150 "%s%s%s%s", i ? " " : "",
7151 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7152 i == tr->clock_id ? "]" : "");
7158 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7162 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7163 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7166 if (i == ARRAY_SIZE(trace_clocks))
7169 mutex_lock(&trace_types_lock);
7173 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7176 * New clock may not be consistent with the previous clock.
7177 * Reset the buffer so that it doesn't have incomparable timestamps.
7179 tracing_reset_online_cpus(&tr->array_buffer);
7181 #ifdef CONFIG_TRACER_MAX_TRACE
7182 if (tr->max_buffer.buffer)
7183 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7184 tracing_reset_online_cpus(&tr->max_buffer);
7187 mutex_unlock(&trace_types_lock);
7192 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7193 size_t cnt, loff_t *fpos)
7195 struct seq_file *m = filp->private_data;
7196 struct trace_array *tr = m->private;
7198 const char *clockstr;
7201 if (cnt >= sizeof(buf))
7204 if (copy_from_user(buf, ubuf, cnt))
7209 clockstr = strstrip(buf);
7211 ret = tracing_set_clock(tr, clockstr);
7220 static int tracing_clock_open(struct inode *inode, struct file *file)
7222 struct trace_array *tr = inode->i_private;
7225 ret = tracing_check_open_get_tr(tr);
7229 ret = single_open(file, tracing_clock_show, inode->i_private);
7231 trace_array_put(tr);
7236 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7238 struct trace_array *tr = m->private;
7240 mutex_lock(&trace_types_lock);
7242 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7243 seq_puts(m, "delta [absolute]\n");
7245 seq_puts(m, "[delta] absolute\n");
7247 mutex_unlock(&trace_types_lock);
7252 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7254 struct trace_array *tr = inode->i_private;
7257 ret = tracing_check_open_get_tr(tr);
7261 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7263 trace_array_put(tr);
7268 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7270 if (rbe == this_cpu_read(trace_buffered_event))
7271 return ring_buffer_time_stamp(buffer);
7273 return ring_buffer_event_time_stamp(buffer, rbe);
7277 * Set or disable using the per CPU trace_buffer_event when possible.
7279 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7283 mutex_lock(&trace_types_lock);
7285 if (set && tr->no_filter_buffering_ref++)
7289 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7294 --tr->no_filter_buffering_ref;
7297 mutex_unlock(&trace_types_lock);
7302 struct ftrace_buffer_info {
7303 struct trace_iterator iter;
7305 unsigned int spare_cpu;
7309 #ifdef CONFIG_TRACER_SNAPSHOT
7310 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7312 struct trace_array *tr = inode->i_private;
7313 struct trace_iterator *iter;
7317 ret = tracing_check_open_get_tr(tr);
7321 if (file->f_mode & FMODE_READ) {
7322 iter = __tracing_open(inode, file, true);
7324 ret = PTR_ERR(iter);
7326 /* Writes still need the seq_file to hold the private data */
7328 m = kzalloc(sizeof(*m), GFP_KERNEL);
7331 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7339 iter->array_buffer = &tr->max_buffer;
7340 iter->cpu_file = tracing_get_cpu(inode);
7342 file->private_data = m;
7346 trace_array_put(tr);
7352 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7355 struct seq_file *m = filp->private_data;
7356 struct trace_iterator *iter = m->private;
7357 struct trace_array *tr = iter->tr;
7361 ret = tracing_update_buffers();
7365 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7369 mutex_lock(&trace_types_lock);
7371 if (tr->current_trace->use_max_tr) {
7376 arch_spin_lock(&tr->max_lock);
7377 if (tr->cond_snapshot)
7379 arch_spin_unlock(&tr->max_lock);
7385 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7389 if (tr->allocated_snapshot)
7393 /* Only allow per-cpu swap if the ring buffer supports it */
7394 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7395 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7400 if (tr->allocated_snapshot)
7401 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7402 &tr->array_buffer, iter->cpu_file);
7404 ret = tracing_alloc_snapshot_instance(tr);
7407 local_irq_disable();
7408 /* Now, we're going to swap */
7409 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7410 update_max_tr(tr, current, smp_processor_id(), NULL);
7412 update_max_tr_single(tr, current, iter->cpu_file);
7416 if (tr->allocated_snapshot) {
7417 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7418 tracing_reset_online_cpus(&tr->max_buffer);
7420 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7430 mutex_unlock(&trace_types_lock);
7434 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7436 struct seq_file *m = file->private_data;
7439 ret = tracing_release(inode, file);
7441 if (file->f_mode & FMODE_READ)
7444 /* If write only, the seq_file is just a stub */
7452 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7453 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7454 size_t count, loff_t *ppos);
7455 static int tracing_buffers_release(struct inode *inode, struct file *file);
7456 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7457 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7459 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7461 struct ftrace_buffer_info *info;
7464 /* The following checks for tracefs lockdown */
7465 ret = tracing_buffers_open(inode, filp);
7469 info = filp->private_data;
7471 if (info->iter.trace->use_max_tr) {
7472 tracing_buffers_release(inode, filp);
7476 info->iter.snapshot = true;
7477 info->iter.array_buffer = &info->iter.tr->max_buffer;
7482 #endif /* CONFIG_TRACER_SNAPSHOT */
7485 static const struct file_operations tracing_thresh_fops = {
7486 .open = tracing_open_generic,
7487 .read = tracing_thresh_read,
7488 .write = tracing_thresh_write,
7489 .llseek = generic_file_llseek,
7492 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7493 static const struct file_operations tracing_max_lat_fops = {
7494 .open = tracing_open_generic,
7495 .read = tracing_max_lat_read,
7496 .write = tracing_max_lat_write,
7497 .llseek = generic_file_llseek,
7501 static const struct file_operations set_tracer_fops = {
7502 .open = tracing_open_generic,
7503 .read = tracing_set_trace_read,
7504 .write = tracing_set_trace_write,
7505 .llseek = generic_file_llseek,
7508 static const struct file_operations tracing_pipe_fops = {
7509 .open = tracing_open_pipe,
7510 .poll = tracing_poll_pipe,
7511 .read = tracing_read_pipe,
7512 .splice_read = tracing_splice_read_pipe,
7513 .release = tracing_release_pipe,
7514 .llseek = no_llseek,
7517 static const struct file_operations tracing_entries_fops = {
7518 .open = tracing_open_generic_tr,
7519 .read = tracing_entries_read,
7520 .write = tracing_entries_write,
7521 .llseek = generic_file_llseek,
7522 .release = tracing_release_generic_tr,
7525 static const struct file_operations tracing_total_entries_fops = {
7526 .open = tracing_open_generic_tr,
7527 .read = tracing_total_entries_read,
7528 .llseek = generic_file_llseek,
7529 .release = tracing_release_generic_tr,
7532 static const struct file_operations tracing_free_buffer_fops = {
7533 .open = tracing_open_generic_tr,
7534 .write = tracing_free_buffer_write,
7535 .release = tracing_free_buffer_release,
7538 static const struct file_operations tracing_mark_fops = {
7539 .open = tracing_open_generic_tr,
7540 .write = tracing_mark_write,
7541 .llseek = generic_file_llseek,
7542 .release = tracing_release_generic_tr,
7545 static const struct file_operations tracing_mark_raw_fops = {
7546 .open = tracing_open_generic_tr,
7547 .write = tracing_mark_raw_write,
7548 .llseek = generic_file_llseek,
7549 .release = tracing_release_generic_tr,
7552 static const struct file_operations trace_clock_fops = {
7553 .open = tracing_clock_open,
7555 .llseek = seq_lseek,
7556 .release = tracing_single_release_tr,
7557 .write = tracing_clock_write,
7560 static const struct file_operations trace_time_stamp_mode_fops = {
7561 .open = tracing_time_stamp_mode_open,
7563 .llseek = seq_lseek,
7564 .release = tracing_single_release_tr,
7567 #ifdef CONFIG_TRACER_SNAPSHOT
7568 static const struct file_operations snapshot_fops = {
7569 .open = tracing_snapshot_open,
7571 .write = tracing_snapshot_write,
7572 .llseek = tracing_lseek,
7573 .release = tracing_snapshot_release,
7576 static const struct file_operations snapshot_raw_fops = {
7577 .open = snapshot_raw_open,
7578 .read = tracing_buffers_read,
7579 .release = tracing_buffers_release,
7580 .splice_read = tracing_buffers_splice_read,
7581 .llseek = no_llseek,
7584 #endif /* CONFIG_TRACER_SNAPSHOT */
7586 #define TRACING_LOG_ERRS_MAX 8
7587 #define TRACING_LOG_LOC_MAX 128
7589 #define CMD_PREFIX " Command: "
7592 const char **errs; /* ptr to loc-specific array of err strings */
7593 u8 type; /* index into errs -> specific err string */
7594 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7598 struct tracing_log_err {
7599 struct list_head list;
7600 struct err_info info;
7601 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7602 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7605 static DEFINE_MUTEX(tracing_err_log_lock);
7607 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7609 struct tracing_log_err *err;
7611 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7612 err = kzalloc(sizeof(*err), GFP_KERNEL);
7614 err = ERR_PTR(-ENOMEM);
7615 tr->n_err_log_entries++;
7620 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7621 list_del(&err->list);
7627 * err_pos - find the position of a string within a command for error careting
7628 * @cmd: The tracing command that caused the error
7629 * @str: The string to position the caret at within @cmd
7631 * Finds the position of the first occurrence of @str within @cmd. The
7632 * return value can be passed to tracing_log_err() for caret placement
7635 * Returns the index within @cmd of the first occurrence of @str or 0
7636 * if @str was not found.
7638 unsigned int err_pos(char *cmd, const char *str)
7642 if (WARN_ON(!strlen(cmd)))
7645 found = strstr(cmd, str);
7653 * tracing_log_err - write an error to the tracing error log
7654 * @tr: The associated trace array for the error (NULL for top level array)
7655 * @loc: A string describing where the error occurred
7656 * @cmd: The tracing command that caused the error
7657 * @errs: The array of loc-specific static error strings
7658 * @type: The index into errs[], which produces the specific static err string
7659 * @pos: The position the caret should be placed in the cmd
7661 * Writes an error into tracing/error_log of the form:
7663 * <loc>: error: <text>
7667 * tracing/error_log is a small log file containing the last
7668 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7669 * unless there has been a tracing error, and the error log can be
7670 * cleared and have its memory freed by writing the empty string in
7671 * truncation mode to it i.e. echo > tracing/error_log.
7673 * NOTE: the @errs array along with the @type param are used to
7674 * produce a static error string - this string is not copied and saved
7675 * when the error is logged - only a pointer to it is saved. See
7676 * existing callers for examples of how static strings are typically
7677 * defined for use with tracing_log_err().
7679 void tracing_log_err(struct trace_array *tr,
7680 const char *loc, const char *cmd,
7681 const char **errs, u8 type, u8 pos)
7683 struct tracing_log_err *err;
7688 mutex_lock(&tracing_err_log_lock);
7689 err = get_tracing_log_err(tr);
7690 if (PTR_ERR(err) == -ENOMEM) {
7691 mutex_unlock(&tracing_err_log_lock);
7695 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7696 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7698 err->info.errs = errs;
7699 err->info.type = type;
7700 err->info.pos = pos;
7701 err->info.ts = local_clock();
7703 list_add_tail(&err->list, &tr->err_log);
7704 mutex_unlock(&tracing_err_log_lock);
7707 static void clear_tracing_err_log(struct trace_array *tr)
7709 struct tracing_log_err *err, *next;
7711 mutex_lock(&tracing_err_log_lock);
7712 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7713 list_del(&err->list);
7717 tr->n_err_log_entries = 0;
7718 mutex_unlock(&tracing_err_log_lock);
7721 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7723 struct trace_array *tr = m->private;
7725 mutex_lock(&tracing_err_log_lock);
7727 return seq_list_start(&tr->err_log, *pos);
7730 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7732 struct trace_array *tr = m->private;
7734 return seq_list_next(v, &tr->err_log, pos);
7737 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7739 mutex_unlock(&tracing_err_log_lock);
7742 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7746 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7748 for (i = 0; i < pos; i++)
7753 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7755 struct tracing_log_err *err = v;
7758 const char *err_text = err->info.errs[err->info.type];
7759 u64 sec = err->info.ts;
7762 nsec = do_div(sec, NSEC_PER_SEC);
7763 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7764 err->loc, err_text);
7765 seq_printf(m, "%s", err->cmd);
7766 tracing_err_log_show_pos(m, err->info.pos);
7772 static const struct seq_operations tracing_err_log_seq_ops = {
7773 .start = tracing_err_log_seq_start,
7774 .next = tracing_err_log_seq_next,
7775 .stop = tracing_err_log_seq_stop,
7776 .show = tracing_err_log_seq_show
7779 static int tracing_err_log_open(struct inode *inode, struct file *file)
7781 struct trace_array *tr = inode->i_private;
7784 ret = tracing_check_open_get_tr(tr);
7788 /* If this file was opened for write, then erase contents */
7789 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7790 clear_tracing_err_log(tr);
7792 if (file->f_mode & FMODE_READ) {
7793 ret = seq_open(file, &tracing_err_log_seq_ops);
7795 struct seq_file *m = file->private_data;
7798 trace_array_put(tr);
7804 static ssize_t tracing_err_log_write(struct file *file,
7805 const char __user *buffer,
7806 size_t count, loff_t *ppos)
7811 static int tracing_err_log_release(struct inode *inode, struct file *file)
7813 struct trace_array *tr = inode->i_private;
7815 trace_array_put(tr);
7817 if (file->f_mode & FMODE_READ)
7818 seq_release(inode, file);
7823 static const struct file_operations tracing_err_log_fops = {
7824 .open = tracing_err_log_open,
7825 .write = tracing_err_log_write,
7827 .llseek = seq_lseek,
7828 .release = tracing_err_log_release,
7831 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7833 struct trace_array *tr = inode->i_private;
7834 struct ftrace_buffer_info *info;
7837 ret = tracing_check_open_get_tr(tr);
7841 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7843 trace_array_put(tr);
7847 mutex_lock(&trace_types_lock);
7850 info->iter.cpu_file = tracing_get_cpu(inode);
7851 info->iter.trace = tr->current_trace;
7852 info->iter.array_buffer = &tr->array_buffer;
7854 /* Force reading ring buffer for first read */
7855 info->read = (unsigned int)-1;
7857 filp->private_data = info;
7861 mutex_unlock(&trace_types_lock);
7863 ret = nonseekable_open(inode, filp);
7865 trace_array_put(tr);
7871 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7873 struct ftrace_buffer_info *info = filp->private_data;
7874 struct trace_iterator *iter = &info->iter;
7876 return trace_poll(iter, filp, poll_table);
7880 tracing_buffers_read(struct file *filp, char __user *ubuf,
7881 size_t count, loff_t *ppos)
7883 struct ftrace_buffer_info *info = filp->private_data;
7884 struct trace_iterator *iter = &info->iter;
7891 #ifdef CONFIG_TRACER_MAX_TRACE
7892 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7897 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7899 if (IS_ERR(info->spare)) {
7900 ret = PTR_ERR(info->spare);
7903 info->spare_cpu = iter->cpu_file;
7909 /* Do we have previous read data to read? */
7910 if (info->read < PAGE_SIZE)
7914 trace_access_lock(iter->cpu_file);
7915 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7919 trace_access_unlock(iter->cpu_file);
7922 if (trace_empty(iter)) {
7923 if ((filp->f_flags & O_NONBLOCK))
7926 ret = wait_on_pipe(iter, 0);
7937 size = PAGE_SIZE - info->read;
7941 ret = copy_to_user(ubuf, info->spare + info->read, size);
7953 static int tracing_buffers_release(struct inode *inode, struct file *file)
7955 struct ftrace_buffer_info *info = file->private_data;
7956 struct trace_iterator *iter = &info->iter;
7958 mutex_lock(&trace_types_lock);
7960 iter->tr->trace_ref--;
7962 __trace_array_put(iter->tr);
7965 ring_buffer_free_read_page(iter->array_buffer->buffer,
7966 info->spare_cpu, info->spare);
7969 mutex_unlock(&trace_types_lock);
7975 struct trace_buffer *buffer;
7978 refcount_t refcount;
7981 static void buffer_ref_release(struct buffer_ref *ref)
7983 if (!refcount_dec_and_test(&ref->refcount))
7985 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7989 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7990 struct pipe_buffer *buf)
7992 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7994 buffer_ref_release(ref);
7998 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7999 struct pipe_buffer *buf)
8001 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8003 if (refcount_read(&ref->refcount) > INT_MAX/2)
8006 refcount_inc(&ref->refcount);
8010 /* Pipe buffer operations for a buffer. */
8011 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8012 .release = buffer_pipe_buf_release,
8013 .get = buffer_pipe_buf_get,
8017 * Callback from splice_to_pipe(), if we need to release some pages
8018 * at the end of the spd in case we error'ed out in filling the pipe.
8020 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8022 struct buffer_ref *ref =
8023 (struct buffer_ref *)spd->partial[i].private;
8025 buffer_ref_release(ref);
8026 spd->partial[i].private = 0;
8030 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8031 struct pipe_inode_info *pipe, size_t len,
8034 struct ftrace_buffer_info *info = file->private_data;
8035 struct trace_iterator *iter = &info->iter;
8036 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8037 struct page *pages_def[PIPE_DEF_BUFFERS];
8038 struct splice_pipe_desc spd = {
8040 .partial = partial_def,
8041 .nr_pages_max = PIPE_DEF_BUFFERS,
8042 .ops = &buffer_pipe_buf_ops,
8043 .spd_release = buffer_spd_release,
8045 struct buffer_ref *ref;
8049 #ifdef CONFIG_TRACER_MAX_TRACE
8050 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8054 if (*ppos & (PAGE_SIZE - 1))
8057 if (len & (PAGE_SIZE - 1)) {
8058 if (len < PAGE_SIZE)
8063 if (splice_grow_spd(pipe, &spd))
8067 trace_access_lock(iter->cpu_file);
8068 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8070 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8074 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8080 refcount_set(&ref->refcount, 1);
8081 ref->buffer = iter->array_buffer->buffer;
8082 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8083 if (IS_ERR(ref->page)) {
8084 ret = PTR_ERR(ref->page);
8089 ref->cpu = iter->cpu_file;
8091 r = ring_buffer_read_page(ref->buffer, &ref->page,
8092 len, iter->cpu_file, 1);
8094 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8100 page = virt_to_page(ref->page);
8102 spd.pages[i] = page;
8103 spd.partial[i].len = PAGE_SIZE;
8104 spd.partial[i].offset = 0;
8105 spd.partial[i].private = (unsigned long)ref;
8109 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8112 trace_access_unlock(iter->cpu_file);
8115 /* did we read anything? */
8116 if (!spd.nr_pages) {
8121 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8124 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8131 ret = splice_to_pipe(pipe, &spd);
8133 splice_shrink_spd(&spd);
8138 static const struct file_operations tracing_buffers_fops = {
8139 .open = tracing_buffers_open,
8140 .read = tracing_buffers_read,
8141 .poll = tracing_buffers_poll,
8142 .release = tracing_buffers_release,
8143 .splice_read = tracing_buffers_splice_read,
8144 .llseek = no_llseek,
8148 tracing_stats_read(struct file *filp, char __user *ubuf,
8149 size_t count, loff_t *ppos)
8151 struct inode *inode = file_inode(filp);
8152 struct trace_array *tr = inode->i_private;
8153 struct array_buffer *trace_buf = &tr->array_buffer;
8154 int cpu = tracing_get_cpu(inode);
8155 struct trace_seq *s;
8157 unsigned long long t;
8158 unsigned long usec_rem;
8160 s = kmalloc(sizeof(*s), GFP_KERNEL);
8166 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8167 trace_seq_printf(s, "entries: %ld\n", cnt);
8169 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8170 trace_seq_printf(s, "overrun: %ld\n", cnt);
8172 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8173 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8175 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8176 trace_seq_printf(s, "bytes: %ld\n", cnt);
8178 if (trace_clocks[tr->clock_id].in_ns) {
8179 /* local or global for trace_clock */
8180 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8181 usec_rem = do_div(t, USEC_PER_SEC);
8182 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8185 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8186 usec_rem = do_div(t, USEC_PER_SEC);
8187 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8189 /* counter or tsc mode for trace_clock */
8190 trace_seq_printf(s, "oldest event ts: %llu\n",
8191 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8193 trace_seq_printf(s, "now ts: %llu\n",
8194 ring_buffer_time_stamp(trace_buf->buffer));
8197 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8198 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8200 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8201 trace_seq_printf(s, "read events: %ld\n", cnt);
8203 count = simple_read_from_buffer(ubuf, count, ppos,
8204 s->buffer, trace_seq_used(s));
8211 static const struct file_operations tracing_stats_fops = {
8212 .open = tracing_open_generic_tr,
8213 .read = tracing_stats_read,
8214 .llseek = generic_file_llseek,
8215 .release = tracing_release_generic_tr,
8218 #ifdef CONFIG_DYNAMIC_FTRACE
8221 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8222 size_t cnt, loff_t *ppos)
8228 /* 256 should be plenty to hold the amount needed */
8229 buf = kmalloc(256, GFP_KERNEL);
8233 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8234 ftrace_update_tot_cnt,
8235 ftrace_number_of_pages,
8236 ftrace_number_of_groups);
8238 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8243 static const struct file_operations tracing_dyn_info_fops = {
8244 .open = tracing_open_generic,
8245 .read = tracing_read_dyn_info,
8246 .llseek = generic_file_llseek,
8248 #endif /* CONFIG_DYNAMIC_FTRACE */
8250 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8252 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8253 struct trace_array *tr, struct ftrace_probe_ops *ops,
8256 tracing_snapshot_instance(tr);
8260 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8261 struct trace_array *tr, struct ftrace_probe_ops *ops,
8264 struct ftrace_func_mapper *mapper = data;
8268 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8278 tracing_snapshot_instance(tr);
8282 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8283 struct ftrace_probe_ops *ops, void *data)
8285 struct ftrace_func_mapper *mapper = data;
8288 seq_printf(m, "%ps:", (void *)ip);
8290 seq_puts(m, "snapshot");
8293 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8296 seq_printf(m, ":count=%ld\n", *count);
8298 seq_puts(m, ":unlimited\n");
8304 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8305 unsigned long ip, void *init_data, void **data)
8307 struct ftrace_func_mapper *mapper = *data;
8310 mapper = allocate_ftrace_func_mapper();
8316 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8320 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8321 unsigned long ip, void *data)
8323 struct ftrace_func_mapper *mapper = data;
8328 free_ftrace_func_mapper(mapper, NULL);
8332 ftrace_func_mapper_remove_ip(mapper, ip);
8335 static struct ftrace_probe_ops snapshot_probe_ops = {
8336 .func = ftrace_snapshot,
8337 .print = ftrace_snapshot_print,
8340 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8341 .func = ftrace_count_snapshot,
8342 .print = ftrace_snapshot_print,
8343 .init = ftrace_snapshot_init,
8344 .free = ftrace_snapshot_free,
8348 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8349 char *glob, char *cmd, char *param, int enable)
8351 struct ftrace_probe_ops *ops;
8352 void *count = (void *)-1;
8359 /* hash funcs only work with set_ftrace_filter */
8363 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8366 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8371 number = strsep(¶m, ":");
8373 if (!strlen(number))
8377 * We use the callback data field (which is a pointer)
8380 ret = kstrtoul(number, 0, (unsigned long *)&count);
8385 ret = tracing_alloc_snapshot_instance(tr);
8389 ret = register_ftrace_function_probe(glob, tr, ops, count);
8392 return ret < 0 ? ret : 0;
8395 static struct ftrace_func_command ftrace_snapshot_cmd = {
8397 .func = ftrace_trace_snapshot_callback,
8400 static __init int register_snapshot_cmd(void)
8402 return register_ftrace_command(&ftrace_snapshot_cmd);
8405 static inline __init int register_snapshot_cmd(void) { return 0; }
8406 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8408 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8410 if (WARN_ON(!tr->dir))
8411 return ERR_PTR(-ENODEV);
8413 /* Top directory uses NULL as the parent */
8414 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8417 /* All sub buffers have a descriptor */
8421 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8423 struct dentry *d_tracer;
8426 return tr->percpu_dir;
8428 d_tracer = tracing_get_dentry(tr);
8429 if (IS_ERR(d_tracer))
8432 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8434 MEM_FAIL(!tr->percpu_dir,
8435 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8437 return tr->percpu_dir;
8440 static struct dentry *
8441 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8442 void *data, long cpu, const struct file_operations *fops)
8444 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8446 if (ret) /* See tracing_get_cpu() */
8447 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8452 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8454 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8455 struct dentry *d_cpu;
8456 char cpu_dir[30]; /* 30 characters should be more than enough */
8461 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8462 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8464 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8468 /* per cpu trace_pipe */
8469 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8470 tr, cpu, &tracing_pipe_fops);
8473 trace_create_cpu_file("trace", 0644, d_cpu,
8474 tr, cpu, &tracing_fops);
8476 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8477 tr, cpu, &tracing_buffers_fops);
8479 trace_create_cpu_file("stats", 0444, d_cpu,
8480 tr, cpu, &tracing_stats_fops);
8482 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8483 tr, cpu, &tracing_entries_fops);
8485 #ifdef CONFIG_TRACER_SNAPSHOT
8486 trace_create_cpu_file("snapshot", 0644, d_cpu,
8487 tr, cpu, &snapshot_fops);
8489 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8490 tr, cpu, &snapshot_raw_fops);
8494 #ifdef CONFIG_FTRACE_SELFTEST
8495 /* Let selftest have access to static functions in this file */
8496 #include "trace_selftest.c"
8500 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8503 struct trace_option_dentry *topt = filp->private_data;
8506 if (topt->flags->val & topt->opt->bit)
8511 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8515 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8518 struct trace_option_dentry *topt = filp->private_data;
8522 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8526 if (val != 0 && val != 1)
8529 if (!!(topt->flags->val & topt->opt->bit) != val) {
8530 mutex_lock(&trace_types_lock);
8531 ret = __set_tracer_option(topt->tr, topt->flags,
8533 mutex_unlock(&trace_types_lock);
8544 static const struct file_operations trace_options_fops = {
8545 .open = tracing_open_generic,
8546 .read = trace_options_read,
8547 .write = trace_options_write,
8548 .llseek = generic_file_llseek,
8552 * In order to pass in both the trace_array descriptor as well as the index
8553 * to the flag that the trace option file represents, the trace_array
8554 * has a character array of trace_flags_index[], which holds the index
8555 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8556 * The address of this character array is passed to the flag option file
8557 * read/write callbacks.
8559 * In order to extract both the index and the trace_array descriptor,
8560 * get_tr_index() uses the following algorithm.
8564 * As the pointer itself contains the address of the index (remember
8567 * Then to get the trace_array descriptor, by subtracting that index
8568 * from the ptr, we get to the start of the index itself.
8570 * ptr - idx == &index[0]
8572 * Then a simple container_of() from that pointer gets us to the
8573 * trace_array descriptor.
8575 static void get_tr_index(void *data, struct trace_array **ptr,
8576 unsigned int *pindex)
8578 *pindex = *(unsigned char *)data;
8580 *ptr = container_of(data - *pindex, struct trace_array,
8585 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8588 void *tr_index = filp->private_data;
8589 struct trace_array *tr;
8593 get_tr_index(tr_index, &tr, &index);
8595 if (tr->trace_flags & (1 << index))
8600 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8604 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8607 void *tr_index = filp->private_data;
8608 struct trace_array *tr;
8613 get_tr_index(tr_index, &tr, &index);
8615 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8619 if (val != 0 && val != 1)
8622 mutex_lock(&event_mutex);
8623 mutex_lock(&trace_types_lock);
8624 ret = set_tracer_flag(tr, 1 << index, val);
8625 mutex_unlock(&trace_types_lock);
8626 mutex_unlock(&event_mutex);
8636 static const struct file_operations trace_options_core_fops = {
8637 .open = tracing_open_generic,
8638 .read = trace_options_core_read,
8639 .write = trace_options_core_write,
8640 .llseek = generic_file_llseek,
8643 struct dentry *trace_create_file(const char *name,
8645 struct dentry *parent,
8647 const struct file_operations *fops)
8651 ret = tracefs_create_file(name, mode, parent, data, fops);
8653 pr_warn("Could not create tracefs '%s' entry\n", name);
8659 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8661 struct dentry *d_tracer;
8666 d_tracer = tracing_get_dentry(tr);
8667 if (IS_ERR(d_tracer))
8670 tr->options = tracefs_create_dir("options", d_tracer);
8672 pr_warn("Could not create tracefs directory 'options'\n");
8680 create_trace_option_file(struct trace_array *tr,
8681 struct trace_option_dentry *topt,
8682 struct tracer_flags *flags,
8683 struct tracer_opt *opt)
8685 struct dentry *t_options;
8687 t_options = trace_options_init_dentry(tr);
8691 topt->flags = flags;
8695 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8696 &trace_options_fops);
8701 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8703 struct trace_option_dentry *topts;
8704 struct trace_options *tr_topts;
8705 struct tracer_flags *flags;
8706 struct tracer_opt *opts;
8713 flags = tracer->flags;
8715 if (!flags || !flags->opts)
8719 * If this is an instance, only create flags for tracers
8720 * the instance may have.
8722 if (!trace_ok_for_array(tracer, tr))
8725 for (i = 0; i < tr->nr_topts; i++) {
8726 /* Make sure there's no duplicate flags. */
8727 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8733 for (cnt = 0; opts[cnt].name; cnt++)
8736 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8740 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8747 tr->topts = tr_topts;
8748 tr->topts[tr->nr_topts].tracer = tracer;
8749 tr->topts[tr->nr_topts].topts = topts;
8752 for (cnt = 0; opts[cnt].name; cnt++) {
8753 create_trace_option_file(tr, &topts[cnt], flags,
8755 MEM_FAIL(topts[cnt].entry == NULL,
8756 "Failed to create trace option: %s",
8761 static struct dentry *
8762 create_trace_option_core_file(struct trace_array *tr,
8763 const char *option, long index)
8765 struct dentry *t_options;
8767 t_options = trace_options_init_dentry(tr);
8771 return trace_create_file(option, 0644, t_options,
8772 (void *)&tr->trace_flags_index[index],
8773 &trace_options_core_fops);
8776 static void create_trace_options_dir(struct trace_array *tr)
8778 struct dentry *t_options;
8779 bool top_level = tr == &global_trace;
8782 t_options = trace_options_init_dentry(tr);
8786 for (i = 0; trace_options[i]; i++) {
8788 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8789 create_trace_option_core_file(tr, trace_options[i], i);
8794 rb_simple_read(struct file *filp, char __user *ubuf,
8795 size_t cnt, loff_t *ppos)
8797 struct trace_array *tr = filp->private_data;
8801 r = tracer_tracing_is_on(tr);
8802 r = sprintf(buf, "%d\n", r);
8804 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8808 rb_simple_write(struct file *filp, const char __user *ubuf,
8809 size_t cnt, loff_t *ppos)
8811 struct trace_array *tr = filp->private_data;
8812 struct trace_buffer *buffer = tr->array_buffer.buffer;
8816 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8821 mutex_lock(&trace_types_lock);
8822 if (!!val == tracer_tracing_is_on(tr)) {
8823 val = 0; /* do nothing */
8825 tracer_tracing_on(tr);
8826 if (tr->current_trace->start)
8827 tr->current_trace->start(tr);
8829 tracer_tracing_off(tr);
8830 if (tr->current_trace->stop)
8831 tr->current_trace->stop(tr);
8833 mutex_unlock(&trace_types_lock);
8841 static const struct file_operations rb_simple_fops = {
8842 .open = tracing_open_generic_tr,
8843 .read = rb_simple_read,
8844 .write = rb_simple_write,
8845 .release = tracing_release_generic_tr,
8846 .llseek = default_llseek,
8850 buffer_percent_read(struct file *filp, char __user *ubuf,
8851 size_t cnt, loff_t *ppos)
8853 struct trace_array *tr = filp->private_data;
8857 r = tr->buffer_percent;
8858 r = sprintf(buf, "%d\n", r);
8860 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8864 buffer_percent_write(struct file *filp, const char __user *ubuf,
8865 size_t cnt, loff_t *ppos)
8867 struct trace_array *tr = filp->private_data;
8871 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8881 tr->buffer_percent = val;
8888 static const struct file_operations buffer_percent_fops = {
8889 .open = tracing_open_generic_tr,
8890 .read = buffer_percent_read,
8891 .write = buffer_percent_write,
8892 .release = tracing_release_generic_tr,
8893 .llseek = default_llseek,
8896 static struct dentry *trace_instance_dir;
8899 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8902 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8904 enum ring_buffer_flags rb_flags;
8906 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8910 buf->buffer = ring_buffer_alloc(size, rb_flags);
8914 buf->data = alloc_percpu(struct trace_array_cpu);
8916 ring_buffer_free(buf->buffer);
8921 /* Allocate the first page for all buffers */
8922 set_buffer_entries(&tr->array_buffer,
8923 ring_buffer_size(tr->array_buffer.buffer, 0));
8928 static int allocate_trace_buffers(struct trace_array *tr, int size)
8932 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8936 #ifdef CONFIG_TRACER_MAX_TRACE
8937 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8938 allocate_snapshot ? size : 1);
8939 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8940 ring_buffer_free(tr->array_buffer.buffer);
8941 tr->array_buffer.buffer = NULL;
8942 free_percpu(tr->array_buffer.data);
8943 tr->array_buffer.data = NULL;
8946 tr->allocated_snapshot = allocate_snapshot;
8949 * Only the top level trace array gets its snapshot allocated
8950 * from the kernel command line.
8952 allocate_snapshot = false;
8958 static void free_trace_buffer(struct array_buffer *buf)
8961 ring_buffer_free(buf->buffer);
8963 free_percpu(buf->data);
8968 static void free_trace_buffers(struct trace_array *tr)
8973 free_trace_buffer(&tr->array_buffer);
8975 #ifdef CONFIG_TRACER_MAX_TRACE
8976 free_trace_buffer(&tr->max_buffer);
8980 static void init_trace_flags_index(struct trace_array *tr)
8984 /* Used by the trace options files */
8985 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8986 tr->trace_flags_index[i] = i;
8989 static void __update_tracer_options(struct trace_array *tr)
8993 for (t = trace_types; t; t = t->next)
8994 add_tracer_options(tr, t);
8997 static void update_tracer_options(struct trace_array *tr)
8999 mutex_lock(&trace_types_lock);
9000 __update_tracer_options(tr);
9001 mutex_unlock(&trace_types_lock);
9004 /* Must have trace_types_lock held */
9005 struct trace_array *trace_array_find(const char *instance)
9007 struct trace_array *tr, *found = NULL;
9009 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9010 if (tr->name && strcmp(tr->name, instance) == 0) {
9019 struct trace_array *trace_array_find_get(const char *instance)
9021 struct trace_array *tr;
9023 mutex_lock(&trace_types_lock);
9024 tr = trace_array_find(instance);
9027 mutex_unlock(&trace_types_lock);
9032 static int trace_array_create_dir(struct trace_array *tr)
9036 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9040 ret = event_trace_add_tracer(tr->dir, tr);
9042 tracefs_remove(tr->dir);
9044 init_tracer_tracefs(tr, tr->dir);
9045 __update_tracer_options(tr);
9050 static struct trace_array *trace_array_create(const char *name)
9052 struct trace_array *tr;
9056 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9058 return ERR_PTR(ret);
9060 tr->name = kstrdup(name, GFP_KERNEL);
9064 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9067 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9069 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9071 raw_spin_lock_init(&tr->start_lock);
9073 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9075 tr->current_trace = &nop_trace;
9077 INIT_LIST_HEAD(&tr->systems);
9078 INIT_LIST_HEAD(&tr->events);
9079 INIT_LIST_HEAD(&tr->hist_vars);
9080 INIT_LIST_HEAD(&tr->err_log);
9082 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9085 if (ftrace_allocate_ftrace_ops(tr) < 0)
9088 ftrace_init_trace_array(tr);
9090 init_trace_flags_index(tr);
9092 if (trace_instance_dir) {
9093 ret = trace_array_create_dir(tr);
9097 __trace_early_add_events(tr);
9099 list_add(&tr->list, &ftrace_trace_arrays);
9106 ftrace_free_ftrace_ops(tr);
9107 free_trace_buffers(tr);
9108 free_cpumask_var(tr->tracing_cpumask);
9112 return ERR_PTR(ret);
9115 static int instance_mkdir(const char *name)
9117 struct trace_array *tr;
9120 mutex_lock(&event_mutex);
9121 mutex_lock(&trace_types_lock);
9124 if (trace_array_find(name))
9127 tr = trace_array_create(name);
9129 ret = PTR_ERR_OR_ZERO(tr);
9132 mutex_unlock(&trace_types_lock);
9133 mutex_unlock(&event_mutex);
9138 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9139 * @name: The name of the trace array to be looked up/created.
9141 * Returns pointer to trace array with given name.
9142 * NULL, if it cannot be created.
9144 * NOTE: This function increments the reference counter associated with the
9145 * trace array returned. This makes sure it cannot be freed while in use.
9146 * Use trace_array_put() once the trace array is no longer needed.
9147 * If the trace_array is to be freed, trace_array_destroy() needs to
9148 * be called after the trace_array_put(), or simply let user space delete
9149 * it from the tracefs instances directory. But until the
9150 * trace_array_put() is called, user space can not delete it.
9153 struct trace_array *trace_array_get_by_name(const char *name)
9155 struct trace_array *tr;
9157 mutex_lock(&event_mutex);
9158 mutex_lock(&trace_types_lock);
9160 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9161 if (tr->name && strcmp(tr->name, name) == 0)
9165 tr = trace_array_create(name);
9173 mutex_unlock(&trace_types_lock);
9174 mutex_unlock(&event_mutex);
9177 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9179 static int __remove_instance(struct trace_array *tr)
9183 /* Reference counter for a newly created trace array = 1. */
9184 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9187 list_del(&tr->list);
9189 /* Disable all the flags that were enabled coming in */
9190 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9191 if ((1 << i) & ZEROED_TRACE_FLAGS)
9192 set_tracer_flag(tr, 1 << i, 0);
9195 tracing_set_nop(tr);
9196 clear_ftrace_function_probes(tr);
9197 event_trace_del_tracer(tr);
9198 ftrace_clear_pids(tr);
9199 ftrace_destroy_function_files(tr);
9200 tracefs_remove(tr->dir);
9201 free_percpu(tr->last_func_repeats);
9202 free_trace_buffers(tr);
9204 for (i = 0; i < tr->nr_topts; i++) {
9205 kfree(tr->topts[i].topts);
9209 free_cpumask_var(tr->tracing_cpumask);
9216 int trace_array_destroy(struct trace_array *this_tr)
9218 struct trace_array *tr;
9224 mutex_lock(&event_mutex);
9225 mutex_lock(&trace_types_lock);
9229 /* Making sure trace array exists before destroying it. */
9230 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9231 if (tr == this_tr) {
9232 ret = __remove_instance(tr);
9237 mutex_unlock(&trace_types_lock);
9238 mutex_unlock(&event_mutex);
9242 EXPORT_SYMBOL_GPL(trace_array_destroy);
9244 static int instance_rmdir(const char *name)
9246 struct trace_array *tr;
9249 mutex_lock(&event_mutex);
9250 mutex_lock(&trace_types_lock);
9253 tr = trace_array_find(name);
9255 ret = __remove_instance(tr);
9257 mutex_unlock(&trace_types_lock);
9258 mutex_unlock(&event_mutex);
9263 static __init void create_trace_instances(struct dentry *d_tracer)
9265 struct trace_array *tr;
9267 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9270 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9273 mutex_lock(&event_mutex);
9274 mutex_lock(&trace_types_lock);
9276 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9279 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9280 "Failed to create instance directory\n"))
9284 mutex_unlock(&trace_types_lock);
9285 mutex_unlock(&event_mutex);
9289 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9291 struct trace_event_file *file;
9294 trace_create_file("available_tracers", 0444, d_tracer,
9295 tr, &show_traces_fops);
9297 trace_create_file("current_tracer", 0644, d_tracer,
9298 tr, &set_tracer_fops);
9300 trace_create_file("tracing_cpumask", 0644, d_tracer,
9301 tr, &tracing_cpumask_fops);
9303 trace_create_file("trace_options", 0644, d_tracer,
9304 tr, &tracing_iter_fops);
9306 trace_create_file("trace", 0644, d_tracer,
9309 trace_create_file("trace_pipe", 0444, d_tracer,
9310 tr, &tracing_pipe_fops);
9312 trace_create_file("buffer_size_kb", 0644, d_tracer,
9313 tr, &tracing_entries_fops);
9315 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9316 tr, &tracing_total_entries_fops);
9318 trace_create_file("free_buffer", 0200, d_tracer,
9319 tr, &tracing_free_buffer_fops);
9321 trace_create_file("trace_marker", 0220, d_tracer,
9322 tr, &tracing_mark_fops);
9324 file = __find_event_file(tr, "ftrace", "print");
9325 if (file && file->dir)
9326 trace_create_file("trigger", 0644, file->dir, file,
9327 &event_trigger_fops);
9328 tr->trace_marker_file = file;
9330 trace_create_file("trace_marker_raw", 0220, d_tracer,
9331 tr, &tracing_mark_raw_fops);
9333 trace_create_file("trace_clock", 0644, d_tracer, tr,
9336 trace_create_file("tracing_on", 0644, d_tracer,
9337 tr, &rb_simple_fops);
9339 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9340 &trace_time_stamp_mode_fops);
9342 tr->buffer_percent = 50;
9344 trace_create_file("buffer_percent", 0444, d_tracer,
9345 tr, &buffer_percent_fops);
9347 create_trace_options_dir(tr);
9349 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
9350 trace_create_maxlat_file(tr, d_tracer);
9353 if (ftrace_create_function_files(tr, d_tracer))
9354 MEM_FAIL(1, "Could not allocate function filter files");
9356 #ifdef CONFIG_TRACER_SNAPSHOT
9357 trace_create_file("snapshot", 0644, d_tracer,
9358 tr, &snapshot_fops);
9361 trace_create_file("error_log", 0644, d_tracer,
9362 tr, &tracing_err_log_fops);
9364 for_each_tracing_cpu(cpu)
9365 tracing_init_tracefs_percpu(tr, cpu);
9367 ftrace_init_tracefs(tr, d_tracer);
9370 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9372 struct vfsmount *mnt;
9373 struct file_system_type *type;
9376 * To maintain backward compatibility for tools that mount
9377 * debugfs to get to the tracing facility, tracefs is automatically
9378 * mounted to the debugfs/tracing directory.
9380 type = get_fs_type("tracefs");
9383 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9384 put_filesystem(type);
9393 * tracing_init_dentry - initialize top level trace array
9395 * This is called when creating files or directories in the tracing
9396 * directory. It is called via fs_initcall() by any of the boot up code
9397 * and expects to return the dentry of the top level tracing directory.
9399 int tracing_init_dentry(void)
9401 struct trace_array *tr = &global_trace;
9403 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9404 pr_warn("Tracing disabled due to lockdown\n");
9408 /* The top level trace array uses NULL as parent */
9412 if (WARN_ON(!tracefs_initialized()))
9416 * As there may still be users that expect the tracing
9417 * files to exist in debugfs/tracing, we must automount
9418 * the tracefs file system there, so older tools still
9419 * work with the newer kernel.
9421 tr->dir = debugfs_create_automount("tracing", NULL,
9422 trace_automount, NULL);
9427 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9428 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9430 static struct workqueue_struct *eval_map_wq __initdata;
9431 static struct work_struct eval_map_work __initdata;
9433 static void __init eval_map_work_func(struct work_struct *work)
9437 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9438 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9441 static int __init trace_eval_init(void)
9443 INIT_WORK(&eval_map_work, eval_map_work_func);
9445 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9447 pr_err("Unable to allocate eval_map_wq\n");
9449 eval_map_work_func(&eval_map_work);
9453 queue_work(eval_map_wq, &eval_map_work);
9457 static int __init trace_eval_sync(void)
9459 /* Make sure the eval map updates are finished */
9461 destroy_workqueue(eval_map_wq);
9465 late_initcall_sync(trace_eval_sync);
9468 #ifdef CONFIG_MODULES
9469 static void trace_module_add_evals(struct module *mod)
9471 if (!mod->num_trace_evals)
9475 * Modules with bad taint do not have events created, do
9476 * not bother with enums either.
9478 if (trace_module_has_bad_taint(mod))
9481 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9484 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9485 static void trace_module_remove_evals(struct module *mod)
9487 union trace_eval_map_item *map;
9488 union trace_eval_map_item **last = &trace_eval_maps;
9490 if (!mod->num_trace_evals)
9493 mutex_lock(&trace_eval_mutex);
9495 map = trace_eval_maps;
9498 if (map->head.mod == mod)
9500 map = trace_eval_jmp_to_tail(map);
9501 last = &map->tail.next;
9502 map = map->tail.next;
9507 *last = trace_eval_jmp_to_tail(map)->tail.next;
9510 mutex_unlock(&trace_eval_mutex);
9513 static inline void trace_module_remove_evals(struct module *mod) { }
9514 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9516 static int trace_module_notify(struct notifier_block *self,
9517 unsigned long val, void *data)
9519 struct module *mod = data;
9522 case MODULE_STATE_COMING:
9523 trace_module_add_evals(mod);
9525 case MODULE_STATE_GOING:
9526 trace_module_remove_evals(mod);
9533 static struct notifier_block trace_module_nb = {
9534 .notifier_call = trace_module_notify,
9537 #endif /* CONFIG_MODULES */
9539 static __init int tracer_init_tracefs(void)
9543 trace_access_lock_init();
9545 ret = tracing_init_dentry();
9551 init_tracer_tracefs(&global_trace, NULL);
9552 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9554 trace_create_file("tracing_thresh", 0644, NULL,
9555 &global_trace, &tracing_thresh_fops);
9557 trace_create_file("README", 0444, NULL,
9558 NULL, &tracing_readme_fops);
9560 trace_create_file("saved_cmdlines", 0444, NULL,
9561 NULL, &tracing_saved_cmdlines_fops);
9563 trace_create_file("saved_cmdlines_size", 0644, NULL,
9564 NULL, &tracing_saved_cmdlines_size_fops);
9566 trace_create_file("saved_tgids", 0444, NULL,
9567 NULL, &tracing_saved_tgids_fops);
9571 trace_create_eval_file(NULL);
9573 #ifdef CONFIG_MODULES
9574 register_module_notifier(&trace_module_nb);
9577 #ifdef CONFIG_DYNAMIC_FTRACE
9578 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
9579 NULL, &tracing_dyn_info_fops);
9582 create_trace_instances(NULL);
9584 update_tracer_options(&global_trace);
9589 fs_initcall(tracer_init_tracefs);
9591 static int trace_panic_handler(struct notifier_block *this,
9592 unsigned long event, void *unused)
9594 if (ftrace_dump_on_oops)
9595 ftrace_dump(ftrace_dump_on_oops);
9599 static struct notifier_block trace_panic_notifier = {
9600 .notifier_call = trace_panic_handler,
9602 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9605 static int trace_die_handler(struct notifier_block *self,
9611 if (ftrace_dump_on_oops)
9612 ftrace_dump(ftrace_dump_on_oops);
9620 static struct notifier_block trace_die_notifier = {
9621 .notifier_call = trace_die_handler,
9626 * printk is set to max of 1024, we really don't need it that big.
9627 * Nothing should be printing 1000 characters anyway.
9629 #define TRACE_MAX_PRINT 1000
9632 * Define here KERN_TRACE so that we have one place to modify
9633 * it if we decide to change what log level the ftrace dump
9636 #define KERN_TRACE KERN_EMERG
9639 trace_printk_seq(struct trace_seq *s)
9641 /* Probably should print a warning here. */
9642 if (s->seq.len >= TRACE_MAX_PRINT)
9643 s->seq.len = TRACE_MAX_PRINT;
9646 * More paranoid code. Although the buffer size is set to
9647 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9648 * an extra layer of protection.
9650 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9651 s->seq.len = s->seq.size - 1;
9653 /* should be zero ended, but we are paranoid. */
9654 s->buffer[s->seq.len] = 0;
9656 printk(KERN_TRACE "%s", s->buffer);
9661 void trace_init_global_iter(struct trace_iterator *iter)
9663 iter->tr = &global_trace;
9664 iter->trace = iter->tr->current_trace;
9665 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9666 iter->array_buffer = &global_trace.array_buffer;
9668 if (iter->trace && iter->trace->open)
9669 iter->trace->open(iter);
9671 /* Annotate start of buffers if we had overruns */
9672 if (ring_buffer_overruns(iter->array_buffer->buffer))
9673 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9675 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9676 if (trace_clocks[iter->tr->clock_id].in_ns)
9677 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9680 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9682 /* use static because iter can be a bit big for the stack */
9683 static struct trace_iterator iter;
9684 static atomic_t dump_running;
9685 struct trace_array *tr = &global_trace;
9686 unsigned int old_userobj;
9687 unsigned long flags;
9690 /* Only allow one dump user at a time. */
9691 if (atomic_inc_return(&dump_running) != 1) {
9692 atomic_dec(&dump_running);
9697 * Always turn off tracing when we dump.
9698 * We don't need to show trace output of what happens
9699 * between multiple crashes.
9701 * If the user does a sysrq-z, then they can re-enable
9702 * tracing with echo 1 > tracing_on.
9706 local_irq_save(flags);
9707 printk_nmi_direct_enter();
9709 /* Simulate the iterator */
9710 trace_init_global_iter(&iter);
9711 /* Can not use kmalloc for iter.temp and iter.fmt */
9712 iter.temp = static_temp_buf;
9713 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9714 iter.fmt = static_fmt_buf;
9715 iter.fmt_size = STATIC_FMT_BUF_SIZE;
9717 for_each_tracing_cpu(cpu) {
9718 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9721 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9723 /* don't look at user memory in panic mode */
9724 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9726 switch (oops_dump_mode) {
9728 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9731 iter.cpu_file = raw_smp_processor_id();
9736 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9737 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9740 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9742 /* Did function tracer already get disabled? */
9743 if (ftrace_is_dead()) {
9744 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9745 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9749 * We need to stop all tracing on all CPUS to read
9750 * the next buffer. This is a bit expensive, but is
9751 * not done often. We fill all what we can read,
9752 * and then release the locks again.
9755 while (!trace_empty(&iter)) {
9758 printk(KERN_TRACE "---------------------------------\n");
9762 trace_iterator_reset(&iter);
9763 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9765 if (trace_find_next_entry_inc(&iter) != NULL) {
9768 ret = print_trace_line(&iter);
9769 if (ret != TRACE_TYPE_NO_CONSUME)
9770 trace_consume(&iter);
9772 touch_nmi_watchdog();
9774 trace_printk_seq(&iter.seq);
9778 printk(KERN_TRACE " (ftrace buffer empty)\n");
9780 printk(KERN_TRACE "---------------------------------\n");
9783 tr->trace_flags |= old_userobj;
9785 for_each_tracing_cpu(cpu) {
9786 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9788 atomic_dec(&dump_running);
9789 printk_nmi_direct_exit();
9790 local_irq_restore(flags);
9792 EXPORT_SYMBOL_GPL(ftrace_dump);
9794 #define WRITE_BUFSIZE 4096
9796 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9797 size_t count, loff_t *ppos,
9798 int (*createfn)(const char *))
9800 char *kbuf, *buf, *tmp;
9805 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9809 while (done < count) {
9810 size = count - done;
9812 if (size >= WRITE_BUFSIZE)
9813 size = WRITE_BUFSIZE - 1;
9815 if (copy_from_user(kbuf, buffer + done, size)) {
9822 tmp = strchr(buf, '\n');
9825 size = tmp - buf + 1;
9828 if (done + size < count) {
9831 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9832 pr_warn("Line length is too long: Should be less than %d\n",
9840 /* Remove comments */
9841 tmp = strchr(buf, '#');
9846 ret = createfn(buf);
9851 } while (done < count);
9861 __init static int tracer_alloc_buffers(void)
9867 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9868 pr_warn("Tracing disabled due to lockdown\n");
9873 * Make sure we don't accidentally add more trace options
9874 * than we have bits for.
9876 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9878 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9881 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9882 goto out_free_buffer_mask;
9884 /* Only allocate trace_printk buffers if a trace_printk exists */
9885 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9886 /* Must be called before global_trace.buffer is allocated */
9887 trace_printk_init_buffers();
9889 /* To save memory, keep the ring buffer size to its minimum */
9890 if (ring_buffer_expanded)
9891 ring_buf_size = trace_buf_size;
9895 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9896 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9898 raw_spin_lock_init(&global_trace.start_lock);
9901 * The prepare callbacks allocates some memory for the ring buffer. We
9902 * don't free the buffer if the CPU goes down. If we were to free
9903 * the buffer, then the user would lose any trace that was in the
9904 * buffer. The memory will be removed once the "instance" is removed.
9906 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9907 "trace/RB:preapre", trace_rb_cpu_prepare,
9910 goto out_free_cpumask;
9911 /* Used for event triggers */
9913 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9915 goto out_rm_hp_state;
9917 if (trace_create_savedcmd() < 0)
9918 goto out_free_temp_buffer;
9920 /* TODO: make the number of buffers hot pluggable with CPUS */
9921 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9922 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9923 goto out_free_savedcmd;
9926 if (global_trace.buffer_disabled)
9929 if (trace_boot_clock) {
9930 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9932 pr_warn("Trace clock %s not defined, going back to default\n",
9937 * register_tracer() might reference current_trace, so it
9938 * needs to be set before we register anything. This is
9939 * just a bootstrap of current_trace anyway.
9941 global_trace.current_trace = &nop_trace;
9943 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9945 ftrace_init_global_array_ops(&global_trace);
9947 init_trace_flags_index(&global_trace);
9949 register_tracer(&nop_trace);
9951 /* Function tracing may start here (via kernel command line) */
9952 init_function_trace();
9954 /* All seems OK, enable tracing */
9955 tracing_disabled = 0;
9957 atomic_notifier_chain_register(&panic_notifier_list,
9958 &trace_panic_notifier);
9960 register_die_notifier(&trace_die_notifier);
9962 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9964 INIT_LIST_HEAD(&global_trace.systems);
9965 INIT_LIST_HEAD(&global_trace.events);
9966 INIT_LIST_HEAD(&global_trace.hist_vars);
9967 INIT_LIST_HEAD(&global_trace.err_log);
9968 list_add(&global_trace.list, &ftrace_trace_arrays);
9970 apply_trace_boot_options();
9972 register_snapshot_cmd();
9979 free_saved_cmdlines_buffer(savedcmd);
9980 out_free_temp_buffer:
9981 ring_buffer_free(temp_buffer);
9983 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9985 free_cpumask_var(global_trace.tracing_cpumask);
9986 out_free_buffer_mask:
9987 free_cpumask_var(tracing_buffer_mask);
9992 void __init early_trace_init(void)
9994 if (tracepoint_printk) {
9995 tracepoint_print_iter =
9996 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9997 if (MEM_FAIL(!tracepoint_print_iter,
9998 "Failed to allocate trace iterator\n"))
9999 tracepoint_printk = 0;
10001 static_key_enable(&tracepoint_printk_key.key);
10003 tracer_alloc_buffers();
10006 void __init trace_init(void)
10008 trace_event_init();
10011 __init static void clear_boot_tracer(void)
10014 * The default tracer at boot buffer is an init section.
10015 * This function is called in lateinit. If we did not
10016 * find the boot tracer, then clear it out, to prevent
10017 * later registration from accessing the buffer that is
10018 * about to be freed.
10020 if (!default_bootup_tracer)
10023 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10024 default_bootup_tracer);
10025 default_bootup_tracer = NULL;
10028 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10029 __init static void tracing_set_default_clock(void)
10031 /* sched_clock_stable() is determined in late_initcall */
10032 if (!trace_boot_clock && !sched_clock_stable()) {
10033 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10034 pr_warn("Can not set tracing clock due to lockdown\n");
10038 printk(KERN_WARNING
10039 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10040 "If you want to keep using the local clock, then add:\n"
10041 " \"trace_clock=local\"\n"
10042 "on the kernel command line\n");
10043 tracing_set_clock(&global_trace, "global");
10047 static inline void tracing_set_default_clock(void) { }
10050 __init static int late_trace_init(void)
10052 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10053 static_key_disable(&tracepoint_printk_key.key);
10054 tracepoint_printk = 0;
10057 tracing_set_default_clock();
10058 clear_boot_tracer();
10062 late_initcall_sync(late_trace_init);