1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
53 #include "trace_output.h"
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
59 bool ring_buffer_expanded;
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
68 static bool __read_mostly tracing_selftest_running;
71 * If boot-time tracing including tracers/events via kernel cmdline
72 * is running, we do not want to run SELFTEST.
74 bool __read_mostly tracing_selftest_disabled;
76 #ifdef CONFIG_FTRACE_STARTUP_TEST
77 void __init disable_tracing_selftest(const char *reason)
79 if (!tracing_selftest_disabled) {
80 tracing_selftest_disabled = true;
81 pr_info("Ftrace startup test is disabled due to %s\n", reason);
86 /* Pipe tracepoints to printk */
87 struct trace_iterator *tracepoint_print_iter;
88 int tracepoint_printk;
89 static bool tracepoint_printk_stop_on_boot __initdata;
90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
92 /* For tracers that don't implement custom flags */
93 static struct tracer_opt dummy_tracer_opt[] = {
98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
104 * To prevent the comm cache from being overwritten when no
105 * tracing is active, only save the comm when a trace event
108 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
111 * Kill all tracing for good (never come back).
112 * It is initialized to 1 but will turn to zero if the initialization
113 * of the tracer is successful. But that is the only place that sets
116 static int tracing_disabled = 1;
118 cpumask_var_t __read_mostly tracing_buffer_mask;
121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
123 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
124 * is set, then ftrace_dump is called. This will output the contents
125 * of the ftrace buffers to the console. This is very useful for
126 * capturing traces that lead to crashes and outputing it to a
129 * It is default off, but you can enable it with either specifying
130 * "ftrace_dump_on_oops" in the kernel command line, or setting
131 * /proc/sys/kernel/ftrace_dump_on_oops
132 * Set 1 if you want to dump buffers of all CPUs
133 * Set 2 if you want to dump the buffer of the CPU that triggered oops
136 enum ftrace_dump_mode ftrace_dump_on_oops;
138 /* When set, tracing will stop when a WARN*() is hit */
139 int __disable_trace_on_warning;
141 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
142 /* Map of enums to their values, for "eval_map" file */
143 struct trace_eval_map_head {
145 unsigned long length;
148 union trace_eval_map_item;
150 struct trace_eval_map_tail {
152 * "end" is first and points to NULL as it must be different
153 * than "mod" or "eval_string"
155 union trace_eval_map_item *next;
156 const char *end; /* points to NULL */
159 static DEFINE_MUTEX(trace_eval_mutex);
162 * The trace_eval_maps are saved in an array with two extra elements,
163 * one at the beginning, and one at the end. The beginning item contains
164 * the count of the saved maps (head.length), and the module they
165 * belong to if not built in (head.mod). The ending item contains a
166 * pointer to the next array of saved eval_map items.
168 union trace_eval_map_item {
169 struct trace_eval_map map;
170 struct trace_eval_map_head head;
171 struct trace_eval_map_tail tail;
174 static union trace_eval_map_item *trace_eval_maps;
175 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
177 int tracing_set_tracer(struct trace_array *tr, const char *buf);
178 static void ftrace_trace_userstack(struct trace_array *tr,
179 struct trace_buffer *buffer,
180 unsigned int trace_ctx);
182 #define MAX_TRACER_SIZE 100
183 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
184 static char *default_bootup_tracer;
186 static bool allocate_snapshot;
188 static int __init set_cmdline_ftrace(char *str)
190 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
191 default_bootup_tracer = bootup_tracer_buf;
192 /* We are using ftrace early, expand it */
193 ring_buffer_expanded = true;
196 __setup("ftrace=", set_cmdline_ftrace);
198 static int __init set_ftrace_dump_on_oops(char *str)
200 if (*str++ != '=' || !*str || !strcmp("1", str)) {
201 ftrace_dump_on_oops = DUMP_ALL;
205 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
206 ftrace_dump_on_oops = DUMP_ORIG;
212 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
214 static int __init stop_trace_on_warning(char *str)
216 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
217 __disable_trace_on_warning = 1;
220 __setup("traceoff_on_warning", stop_trace_on_warning);
222 static int __init boot_alloc_snapshot(char *str)
224 allocate_snapshot = true;
225 /* We also need the main ring buffer expanded */
226 ring_buffer_expanded = true;
229 __setup("alloc_snapshot", boot_alloc_snapshot);
232 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
234 static int __init set_trace_boot_options(char *str)
236 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
239 __setup("trace_options=", set_trace_boot_options);
241 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
242 static char *trace_boot_clock __initdata;
244 static int __init set_trace_boot_clock(char *str)
246 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
247 trace_boot_clock = trace_boot_clock_buf;
250 __setup("trace_clock=", set_trace_boot_clock);
252 static int __init set_tracepoint_printk(char *str)
254 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
255 tracepoint_printk = 1;
258 __setup("tp_printk", set_tracepoint_printk);
260 static int __init set_tracepoint_printk_stop(char *str)
262 tracepoint_printk_stop_on_boot = true;
265 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
267 unsigned long long ns2usecs(u64 nsec)
275 trace_process_export(struct trace_export *export,
276 struct ring_buffer_event *event, int flag)
278 struct trace_entry *entry;
279 unsigned int size = 0;
281 if (export->flags & flag) {
282 entry = ring_buffer_event_data(event);
283 size = ring_buffer_event_length(event);
284 export->write(export, entry, size);
288 static DEFINE_MUTEX(ftrace_export_lock);
290 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
292 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
293 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
294 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
296 static inline void ftrace_exports_enable(struct trace_export *export)
298 if (export->flags & TRACE_EXPORT_FUNCTION)
299 static_branch_inc(&trace_function_exports_enabled);
301 if (export->flags & TRACE_EXPORT_EVENT)
302 static_branch_inc(&trace_event_exports_enabled);
304 if (export->flags & TRACE_EXPORT_MARKER)
305 static_branch_inc(&trace_marker_exports_enabled);
308 static inline void ftrace_exports_disable(struct trace_export *export)
310 if (export->flags & TRACE_EXPORT_FUNCTION)
311 static_branch_dec(&trace_function_exports_enabled);
313 if (export->flags & TRACE_EXPORT_EVENT)
314 static_branch_dec(&trace_event_exports_enabled);
316 if (export->flags & TRACE_EXPORT_MARKER)
317 static_branch_dec(&trace_marker_exports_enabled);
320 static void ftrace_exports(struct ring_buffer_event *event, int flag)
322 struct trace_export *export;
324 preempt_disable_notrace();
326 export = rcu_dereference_raw_check(ftrace_exports_list);
328 trace_process_export(export, event, flag);
329 export = rcu_dereference_raw_check(export->next);
332 preempt_enable_notrace();
336 add_trace_export(struct trace_export **list, struct trace_export *export)
338 rcu_assign_pointer(export->next, *list);
340 * We are entering export into the list but another
341 * CPU might be walking that list. We need to make sure
342 * the export->next pointer is valid before another CPU sees
343 * the export pointer included into the list.
345 rcu_assign_pointer(*list, export);
349 rm_trace_export(struct trace_export **list, struct trace_export *export)
351 struct trace_export **p;
353 for (p = list; *p != NULL; p = &(*p)->next)
360 rcu_assign_pointer(*p, (*p)->next);
366 add_ftrace_export(struct trace_export **list, struct trace_export *export)
368 ftrace_exports_enable(export);
370 add_trace_export(list, export);
374 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
378 ret = rm_trace_export(list, export);
379 ftrace_exports_disable(export);
384 int register_ftrace_export(struct trace_export *export)
386 if (WARN_ON_ONCE(!export->write))
389 mutex_lock(&ftrace_export_lock);
391 add_ftrace_export(&ftrace_exports_list, export);
393 mutex_unlock(&ftrace_export_lock);
397 EXPORT_SYMBOL_GPL(register_ftrace_export);
399 int unregister_ftrace_export(struct trace_export *export)
403 mutex_lock(&ftrace_export_lock);
405 ret = rm_ftrace_export(&ftrace_exports_list, export);
407 mutex_unlock(&ftrace_export_lock);
411 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
413 /* trace_flags holds trace_options default values */
414 #define TRACE_DEFAULT_FLAGS \
415 (FUNCTION_DEFAULT_FLAGS | \
416 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
417 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
418 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
419 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
422 /* trace_options that are only supported by global_trace */
423 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
424 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
426 /* trace_flags that are default zero for instances */
427 #define ZEROED_TRACE_FLAGS \
428 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
431 * The global_trace is the descriptor that holds the top-level tracing
432 * buffers for the live tracing.
434 static struct trace_array global_trace = {
435 .trace_flags = TRACE_DEFAULT_FLAGS,
438 LIST_HEAD(ftrace_trace_arrays);
440 int trace_array_get(struct trace_array *this_tr)
442 struct trace_array *tr;
445 mutex_lock(&trace_types_lock);
446 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
453 mutex_unlock(&trace_types_lock);
458 static void __trace_array_put(struct trace_array *this_tr)
460 WARN_ON(!this_tr->ref);
465 * trace_array_put - Decrement the reference counter for this trace array.
466 * @this_tr : pointer to the trace array
468 * NOTE: Use this when we no longer need the trace array returned by
469 * trace_array_get_by_name(). This ensures the trace array can be later
473 void trace_array_put(struct trace_array *this_tr)
478 mutex_lock(&trace_types_lock);
479 __trace_array_put(this_tr);
480 mutex_unlock(&trace_types_lock);
482 EXPORT_SYMBOL_GPL(trace_array_put);
484 int tracing_check_open_get_tr(struct trace_array *tr)
488 ret = security_locked_down(LOCKDOWN_TRACEFS);
492 if (tracing_disabled)
495 if (tr && trace_array_get(tr) < 0)
501 int call_filter_check_discard(struct trace_event_call *call, void *rec,
502 struct trace_buffer *buffer,
503 struct ring_buffer_event *event)
505 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
506 !filter_match_preds(call->filter, rec)) {
507 __trace_event_discard_commit(buffer, event);
514 void trace_free_pid_list(struct trace_pid_list *pid_list)
516 vfree(pid_list->pids);
521 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
522 * @filtered_pids: The list of pids to check
523 * @search_pid: The PID to find in @filtered_pids
525 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
528 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
531 * If pid_max changed after filtered_pids was created, we
532 * by default ignore all pids greater than the previous pid_max.
534 if (search_pid >= filtered_pids->pid_max)
537 return test_bit(search_pid, filtered_pids->pids);
541 * trace_ignore_this_task - should a task be ignored for tracing
542 * @filtered_pids: The list of pids to check
543 * @filtered_no_pids: The list of pids not to be traced
544 * @task: The task that should be ignored if not filtered
546 * Checks if @task should be traced or not from @filtered_pids.
547 * Returns true if @task should *NOT* be traced.
548 * Returns false if @task should be traced.
551 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
552 struct trace_pid_list *filtered_no_pids,
553 struct task_struct *task)
556 * If filtered_no_pids is not empty, and the task's pid is listed
557 * in filtered_no_pids, then return true.
558 * Otherwise, if filtered_pids is empty, that means we can
559 * trace all tasks. If it has content, then only trace pids
560 * within filtered_pids.
563 return (filtered_pids &&
564 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
566 trace_find_filtered_pid(filtered_no_pids, task->pid));
570 * trace_filter_add_remove_task - Add or remove a task from a pid_list
571 * @pid_list: The list to modify
572 * @self: The current task for fork or NULL for exit
573 * @task: The task to add or remove
575 * If adding a task, if @self is defined, the task is only added if @self
576 * is also included in @pid_list. This happens on fork and tasks should
577 * only be added when the parent is listed. If @self is NULL, then the
578 * @task pid will be removed from the list, which would happen on exit
581 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
582 struct task_struct *self,
583 struct task_struct *task)
588 /* For forks, we only add if the forking task is listed */
590 if (!trace_find_filtered_pid(pid_list, self->pid))
594 /* Sorry, but we don't support pid_max changing after setting */
595 if (task->pid >= pid_list->pid_max)
598 /* "self" is set for forks, and NULL for exits */
600 set_bit(task->pid, pid_list->pids);
602 clear_bit(task->pid, pid_list->pids);
606 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
607 * @pid_list: The pid list to show
608 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
609 * @pos: The position of the file
611 * This is used by the seq_file "next" operation to iterate the pids
612 * listed in a trace_pid_list structure.
614 * Returns the pid+1 as we want to display pid of zero, but NULL would
615 * stop the iteration.
617 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
619 unsigned long pid = (unsigned long)v;
623 /* pid already is +1 of the actual previous bit */
624 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
626 /* Return pid + 1 to allow zero to be represented */
627 if (pid < pid_list->pid_max)
628 return (void *)(pid + 1);
634 * trace_pid_start - Used for seq_file to start reading pid lists
635 * @pid_list: The pid list to show
636 * @pos: The position of the file
638 * This is used by seq_file "start" operation to start the iteration
641 * Returns the pid+1 as we want to display pid of zero, but NULL would
642 * stop the iteration.
644 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
649 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
650 if (pid >= pid_list->pid_max)
653 /* Return pid + 1 so that zero can be the exit value */
654 for (pid++; pid && l < *pos;
655 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
661 * trace_pid_show - show the current pid in seq_file processing
662 * @m: The seq_file structure to write into
663 * @v: A void pointer of the pid (+1) value to display
665 * Can be directly used by seq_file operations to display the current
668 int trace_pid_show(struct seq_file *m, void *v)
670 unsigned long pid = (unsigned long)v - 1;
672 seq_printf(m, "%lu\n", pid);
676 /* 128 should be much more than enough */
677 #define PID_BUF_SIZE 127
679 int trace_pid_write(struct trace_pid_list *filtered_pids,
680 struct trace_pid_list **new_pid_list,
681 const char __user *ubuf, size_t cnt)
683 struct trace_pid_list *pid_list;
684 struct trace_parser parser;
692 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
696 * Always recreate a new array. The write is an all or nothing
697 * operation. Always create a new array when adding new pids by
698 * the user. If the operation fails, then the current list is
701 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
703 trace_parser_put(&parser);
707 pid_list->pid_max = READ_ONCE(pid_max);
709 /* Only truncating will shrink pid_max */
710 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
711 pid_list->pid_max = filtered_pids->pid_max;
713 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
714 if (!pid_list->pids) {
715 trace_parser_put(&parser);
721 /* copy the current bits to the new max */
722 for_each_set_bit(pid, filtered_pids->pids,
723 filtered_pids->pid_max) {
724 set_bit(pid, pid_list->pids);
733 ret = trace_get_user(&parser, ubuf, cnt, &pos);
734 if (ret < 0 || !trace_parser_loaded(&parser))
742 if (kstrtoul(parser.buffer, 0, &val))
744 if (val >= pid_list->pid_max)
749 set_bit(pid, pid_list->pids);
752 trace_parser_clear(&parser);
755 trace_parser_put(&parser);
758 trace_free_pid_list(pid_list);
763 /* Cleared the list of pids */
764 trace_free_pid_list(pid_list);
769 *new_pid_list = pid_list;
774 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
778 /* Early boot up does not have a buffer yet */
780 return trace_clock_local();
782 ts = ring_buffer_time_stamp(buf->buffer);
783 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
788 u64 ftrace_now(int cpu)
790 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
794 * tracing_is_enabled - Show if global_trace has been enabled
796 * Shows if the global trace has been enabled or not. It uses the
797 * mirror flag "buffer_disabled" to be used in fast paths such as for
798 * the irqsoff tracer. But it may be inaccurate due to races. If you
799 * need to know the accurate state, use tracing_is_on() which is a little
800 * slower, but accurate.
802 int tracing_is_enabled(void)
805 * For quick access (irqsoff uses this in fast path), just
806 * return the mirror variable of the state of the ring buffer.
807 * It's a little racy, but we don't really care.
810 return !global_trace.buffer_disabled;
814 * trace_buf_size is the size in bytes that is allocated
815 * for a buffer. Note, the number of bytes is always rounded
818 * This number is purposely set to a low number of 16384.
819 * If the dump on oops happens, it will be much appreciated
820 * to not have to wait for all that output. Anyway this can be
821 * boot time and run time configurable.
823 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
825 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
827 /* trace_types holds a link list of available tracers. */
828 static struct tracer *trace_types __read_mostly;
831 * trace_types_lock is used to protect the trace_types list.
833 DEFINE_MUTEX(trace_types_lock);
836 * serialize the access of the ring buffer
838 * ring buffer serializes readers, but it is low level protection.
839 * The validity of the events (which returns by ring_buffer_peek() ..etc)
840 * are not protected by ring buffer.
842 * The content of events may become garbage if we allow other process consumes
843 * these events concurrently:
844 * A) the page of the consumed events may become a normal page
845 * (not reader page) in ring buffer, and this page will be rewritten
846 * by events producer.
847 * B) The page of the consumed events may become a page for splice_read,
848 * and this page will be returned to system.
850 * These primitives allow multi process access to different cpu ring buffer
853 * These primitives don't distinguish read-only and read-consume access.
854 * Multi read-only access are also serialized.
858 static DECLARE_RWSEM(all_cpu_access_lock);
859 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
861 static inline void trace_access_lock(int cpu)
863 if (cpu == RING_BUFFER_ALL_CPUS) {
864 /* gain it for accessing the whole ring buffer. */
865 down_write(&all_cpu_access_lock);
867 /* gain it for accessing a cpu ring buffer. */
869 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
870 down_read(&all_cpu_access_lock);
872 /* Secondly block other access to this @cpu ring buffer. */
873 mutex_lock(&per_cpu(cpu_access_lock, cpu));
877 static inline void trace_access_unlock(int cpu)
879 if (cpu == RING_BUFFER_ALL_CPUS) {
880 up_write(&all_cpu_access_lock);
882 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
883 up_read(&all_cpu_access_lock);
887 static inline void trace_access_lock_init(void)
891 for_each_possible_cpu(cpu)
892 mutex_init(&per_cpu(cpu_access_lock, cpu));
897 static DEFINE_MUTEX(access_lock);
899 static inline void trace_access_lock(int cpu)
902 mutex_lock(&access_lock);
905 static inline void trace_access_unlock(int cpu)
908 mutex_unlock(&access_lock);
911 static inline void trace_access_lock_init(void)
917 #ifdef CONFIG_STACKTRACE
918 static void __ftrace_trace_stack(struct trace_buffer *buffer,
919 unsigned int trace_ctx,
920 int skip, struct pt_regs *regs);
921 static inline void ftrace_trace_stack(struct trace_array *tr,
922 struct trace_buffer *buffer,
923 unsigned int trace_ctx,
924 int skip, struct pt_regs *regs);
927 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
928 unsigned int trace_ctx,
929 int skip, struct pt_regs *regs)
932 static inline void ftrace_trace_stack(struct trace_array *tr,
933 struct trace_buffer *buffer,
934 unsigned long trace_ctx,
935 int skip, struct pt_regs *regs)
941 static __always_inline void
942 trace_event_setup(struct ring_buffer_event *event,
943 int type, unsigned int trace_ctx)
945 struct trace_entry *ent = ring_buffer_event_data(event);
947 tracing_generic_entry_update(ent, type, trace_ctx);
950 static __always_inline struct ring_buffer_event *
951 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
954 unsigned int trace_ctx)
956 struct ring_buffer_event *event;
958 event = ring_buffer_lock_reserve(buffer, len);
960 trace_event_setup(event, type, trace_ctx);
965 void tracer_tracing_on(struct trace_array *tr)
967 if (tr->array_buffer.buffer)
968 ring_buffer_record_on(tr->array_buffer.buffer);
970 * This flag is looked at when buffers haven't been allocated
971 * yet, or by some tracers (like irqsoff), that just want to
972 * know if the ring buffer has been disabled, but it can handle
973 * races of where it gets disabled but we still do a record.
974 * As the check is in the fast path of the tracers, it is more
975 * important to be fast than accurate.
977 tr->buffer_disabled = 0;
978 /* Make the flag seen by readers */
983 * tracing_on - enable tracing buffers
985 * This function enables tracing buffers that may have been
986 * disabled with tracing_off.
988 void tracing_on(void)
990 tracer_tracing_on(&global_trace);
992 EXPORT_SYMBOL_GPL(tracing_on);
995 static __always_inline void
996 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
998 __this_cpu_write(trace_taskinfo_save, true);
1000 /* If this is the temp buffer, we need to commit fully */
1001 if (this_cpu_read(trace_buffered_event) == event) {
1002 /* Length is in event->array[0] */
1003 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1004 /* Release the temp buffer */
1005 this_cpu_dec(trace_buffered_event_cnt);
1007 ring_buffer_unlock_commit(buffer, event);
1011 * __trace_puts - write a constant string into the trace buffer.
1012 * @ip: The address of the caller
1013 * @str: The constant string to write
1014 * @size: The size of the string.
1016 int __trace_puts(unsigned long ip, const char *str, int size)
1018 struct ring_buffer_event *event;
1019 struct trace_buffer *buffer;
1020 struct print_entry *entry;
1021 unsigned int trace_ctx;
1024 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1027 if (unlikely(tracing_selftest_running || tracing_disabled))
1030 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1032 trace_ctx = tracing_gen_ctx();
1033 buffer = global_trace.array_buffer.buffer;
1034 ring_buffer_nest_start(buffer);
1035 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1042 entry = ring_buffer_event_data(event);
1045 memcpy(&entry->buf, str, size);
1047 /* Add a newline if necessary */
1048 if (entry->buf[size - 1] != '\n') {
1049 entry->buf[size] = '\n';
1050 entry->buf[size + 1] = '\0';
1052 entry->buf[size] = '\0';
1054 __buffer_unlock_commit(buffer, event);
1055 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1057 ring_buffer_nest_end(buffer);
1060 EXPORT_SYMBOL_GPL(__trace_puts);
1063 * __trace_bputs - write the pointer to a constant string into trace buffer
1064 * @ip: The address of the caller
1065 * @str: The constant string to write to the buffer to
1067 int __trace_bputs(unsigned long ip, const char *str)
1069 struct ring_buffer_event *event;
1070 struct trace_buffer *buffer;
1071 struct bputs_entry *entry;
1072 unsigned int trace_ctx;
1073 int size = sizeof(struct bputs_entry);
1076 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1079 if (unlikely(tracing_selftest_running || tracing_disabled))
1082 trace_ctx = tracing_gen_ctx();
1083 buffer = global_trace.array_buffer.buffer;
1085 ring_buffer_nest_start(buffer);
1086 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1091 entry = ring_buffer_event_data(event);
1095 __buffer_unlock_commit(buffer, event);
1096 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1100 ring_buffer_nest_end(buffer);
1103 EXPORT_SYMBOL_GPL(__trace_bputs);
1105 #ifdef CONFIG_TRACER_SNAPSHOT
1106 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1109 struct tracer *tracer = tr->current_trace;
1110 unsigned long flags;
1113 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1114 internal_trace_puts("*** snapshot is being ignored ***\n");
1118 if (!tr->allocated_snapshot) {
1119 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1120 internal_trace_puts("*** stopping trace here! ***\n");
1125 /* Note, snapshot can not be used when the tracer uses it */
1126 if (tracer->use_max_tr) {
1127 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1128 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1132 local_irq_save(flags);
1133 update_max_tr(tr, current, smp_processor_id(), cond_data);
1134 local_irq_restore(flags);
1137 void tracing_snapshot_instance(struct trace_array *tr)
1139 tracing_snapshot_instance_cond(tr, NULL);
1143 * tracing_snapshot - take a snapshot of the current buffer.
1145 * This causes a swap between the snapshot buffer and the current live
1146 * tracing buffer. You can use this to take snapshots of the live
1147 * trace when some condition is triggered, but continue to trace.
1149 * Note, make sure to allocate the snapshot with either
1150 * a tracing_snapshot_alloc(), or by doing it manually
1151 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1153 * If the snapshot buffer is not allocated, it will stop tracing.
1154 * Basically making a permanent snapshot.
1156 void tracing_snapshot(void)
1158 struct trace_array *tr = &global_trace;
1160 tracing_snapshot_instance(tr);
1162 EXPORT_SYMBOL_GPL(tracing_snapshot);
1165 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1166 * @tr: The tracing instance to snapshot
1167 * @cond_data: The data to be tested conditionally, and possibly saved
1169 * This is the same as tracing_snapshot() except that the snapshot is
1170 * conditional - the snapshot will only happen if the
1171 * cond_snapshot.update() implementation receiving the cond_data
1172 * returns true, which means that the trace array's cond_snapshot
1173 * update() operation used the cond_data to determine whether the
1174 * snapshot should be taken, and if it was, presumably saved it along
1175 * with the snapshot.
1177 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1179 tracing_snapshot_instance_cond(tr, cond_data);
1181 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1184 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1185 * @tr: The tracing instance
1187 * When the user enables a conditional snapshot using
1188 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1189 * with the snapshot. This accessor is used to retrieve it.
1191 * Should not be called from cond_snapshot.update(), since it takes
1192 * the tr->max_lock lock, which the code calling
1193 * cond_snapshot.update() has already done.
1195 * Returns the cond_data associated with the trace array's snapshot.
1197 void *tracing_cond_snapshot_data(struct trace_array *tr)
1199 void *cond_data = NULL;
1201 arch_spin_lock(&tr->max_lock);
1203 if (tr->cond_snapshot)
1204 cond_data = tr->cond_snapshot->cond_data;
1206 arch_spin_unlock(&tr->max_lock);
1210 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1212 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1213 struct array_buffer *size_buf, int cpu_id);
1214 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1216 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1220 if (!tr->allocated_snapshot) {
1222 /* allocate spare buffer */
1223 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1224 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1228 tr->allocated_snapshot = true;
1234 static void free_snapshot(struct trace_array *tr)
1237 * We don't free the ring buffer. instead, resize it because
1238 * The max_tr ring buffer has some state (e.g. ring->clock) and
1239 * we want preserve it.
1241 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1242 set_buffer_entries(&tr->max_buffer, 1);
1243 tracing_reset_online_cpus(&tr->max_buffer);
1244 tr->allocated_snapshot = false;
1248 * tracing_alloc_snapshot - allocate snapshot buffer.
1250 * This only allocates the snapshot buffer if it isn't already
1251 * allocated - it doesn't also take a snapshot.
1253 * This is meant to be used in cases where the snapshot buffer needs
1254 * to be set up for events that can't sleep but need to be able to
1255 * trigger a snapshot.
1257 int tracing_alloc_snapshot(void)
1259 struct trace_array *tr = &global_trace;
1262 ret = tracing_alloc_snapshot_instance(tr);
1267 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1270 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1272 * This is similar to tracing_snapshot(), but it will allocate the
1273 * snapshot buffer if it isn't already allocated. Use this only
1274 * where it is safe to sleep, as the allocation may sleep.
1276 * This causes a swap between the snapshot buffer and the current live
1277 * tracing buffer. You can use this to take snapshots of the live
1278 * trace when some condition is triggered, but continue to trace.
1280 void tracing_snapshot_alloc(void)
1284 ret = tracing_alloc_snapshot();
1290 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1293 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1294 * @tr: The tracing instance
1295 * @cond_data: User data to associate with the snapshot
1296 * @update: Implementation of the cond_snapshot update function
1298 * Check whether the conditional snapshot for the given instance has
1299 * already been enabled, or if the current tracer is already using a
1300 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1301 * save the cond_data and update function inside.
1303 * Returns 0 if successful, error otherwise.
1305 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1306 cond_update_fn_t update)
1308 struct cond_snapshot *cond_snapshot;
1311 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1315 cond_snapshot->cond_data = cond_data;
1316 cond_snapshot->update = update;
1318 mutex_lock(&trace_types_lock);
1320 ret = tracing_alloc_snapshot_instance(tr);
1324 if (tr->current_trace->use_max_tr) {
1330 * The cond_snapshot can only change to NULL without the
1331 * trace_types_lock. We don't care if we race with it going
1332 * to NULL, but we want to make sure that it's not set to
1333 * something other than NULL when we get here, which we can
1334 * do safely with only holding the trace_types_lock and not
1335 * having to take the max_lock.
1337 if (tr->cond_snapshot) {
1342 arch_spin_lock(&tr->max_lock);
1343 tr->cond_snapshot = cond_snapshot;
1344 arch_spin_unlock(&tr->max_lock);
1346 mutex_unlock(&trace_types_lock);
1351 mutex_unlock(&trace_types_lock);
1352 kfree(cond_snapshot);
1355 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1358 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1359 * @tr: The tracing instance
1361 * Check whether the conditional snapshot for the given instance is
1362 * enabled; if so, free the cond_snapshot associated with it,
1363 * otherwise return -EINVAL.
1365 * Returns 0 if successful, error otherwise.
1367 int tracing_snapshot_cond_disable(struct trace_array *tr)
1371 arch_spin_lock(&tr->max_lock);
1373 if (!tr->cond_snapshot)
1376 kfree(tr->cond_snapshot);
1377 tr->cond_snapshot = NULL;
1380 arch_spin_unlock(&tr->max_lock);
1384 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1386 void tracing_snapshot(void)
1388 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1390 EXPORT_SYMBOL_GPL(tracing_snapshot);
1391 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1393 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1395 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1396 int tracing_alloc_snapshot(void)
1398 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1401 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1402 void tracing_snapshot_alloc(void)
1407 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1408 void *tracing_cond_snapshot_data(struct trace_array *tr)
1412 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1413 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1417 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1418 int tracing_snapshot_cond_disable(struct trace_array *tr)
1422 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1423 #endif /* CONFIG_TRACER_SNAPSHOT */
1425 void tracer_tracing_off(struct trace_array *tr)
1427 if (tr->array_buffer.buffer)
1428 ring_buffer_record_off(tr->array_buffer.buffer);
1430 * This flag is looked at when buffers haven't been allocated
1431 * yet, or by some tracers (like irqsoff), that just want to
1432 * know if the ring buffer has been disabled, but it can handle
1433 * races of where it gets disabled but we still do a record.
1434 * As the check is in the fast path of the tracers, it is more
1435 * important to be fast than accurate.
1437 tr->buffer_disabled = 1;
1438 /* Make the flag seen by readers */
1443 * tracing_off - turn off tracing buffers
1445 * This function stops the tracing buffers from recording data.
1446 * It does not disable any overhead the tracers themselves may
1447 * be causing. This function simply causes all recording to
1448 * the ring buffers to fail.
1450 void tracing_off(void)
1452 tracer_tracing_off(&global_trace);
1454 EXPORT_SYMBOL_GPL(tracing_off);
1456 void disable_trace_on_warning(void)
1458 if (__disable_trace_on_warning) {
1459 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1460 "Disabling tracing due to warning\n");
1466 * tracer_tracing_is_on - show real state of ring buffer enabled
1467 * @tr : the trace array to know if ring buffer is enabled
1469 * Shows real state of the ring buffer if it is enabled or not.
1471 bool tracer_tracing_is_on(struct trace_array *tr)
1473 if (tr->array_buffer.buffer)
1474 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1475 return !tr->buffer_disabled;
1479 * tracing_is_on - show state of ring buffers enabled
1481 int tracing_is_on(void)
1483 return tracer_tracing_is_on(&global_trace);
1485 EXPORT_SYMBOL_GPL(tracing_is_on);
1487 static int __init set_buf_size(char *str)
1489 unsigned long buf_size;
1493 buf_size = memparse(str, &str);
1494 /* nr_entries can not be zero */
1497 trace_buf_size = buf_size;
1500 __setup("trace_buf_size=", set_buf_size);
1502 static int __init set_tracing_thresh(char *str)
1504 unsigned long threshold;
1509 ret = kstrtoul(str, 0, &threshold);
1512 tracing_thresh = threshold * 1000;
1515 __setup("tracing_thresh=", set_tracing_thresh);
1517 unsigned long nsecs_to_usecs(unsigned long nsecs)
1519 return nsecs / 1000;
1523 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1524 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1525 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1526 * of strings in the order that the evals (enum) were defined.
1531 /* These must match the bit positions in trace_iterator_flags */
1532 static const char *trace_options[] = {
1540 int in_ns; /* is this clock in nanoseconds? */
1541 } trace_clocks[] = {
1542 { trace_clock_local, "local", 1 },
1543 { trace_clock_global, "global", 1 },
1544 { trace_clock_counter, "counter", 0 },
1545 { trace_clock_jiffies, "uptime", 0 },
1546 { trace_clock, "perf", 1 },
1547 { ktime_get_mono_fast_ns, "mono", 1 },
1548 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1549 { ktime_get_boot_fast_ns, "boot", 1 },
1553 bool trace_clock_in_ns(struct trace_array *tr)
1555 if (trace_clocks[tr->clock_id].in_ns)
1562 * trace_parser_get_init - gets the buffer for trace parser
1564 int trace_parser_get_init(struct trace_parser *parser, int size)
1566 memset(parser, 0, sizeof(*parser));
1568 parser->buffer = kmalloc(size, GFP_KERNEL);
1569 if (!parser->buffer)
1572 parser->size = size;
1577 * trace_parser_put - frees the buffer for trace parser
1579 void trace_parser_put(struct trace_parser *parser)
1581 kfree(parser->buffer);
1582 parser->buffer = NULL;
1586 * trace_get_user - reads the user input string separated by space
1587 * (matched by isspace(ch))
1589 * For each string found the 'struct trace_parser' is updated,
1590 * and the function returns.
1592 * Returns number of bytes read.
1594 * See kernel/trace/trace.h for 'struct trace_parser' details.
1596 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1597 size_t cnt, loff_t *ppos)
1604 trace_parser_clear(parser);
1606 ret = get_user(ch, ubuf++);
1614 * The parser is not finished with the last write,
1615 * continue reading the user input without skipping spaces.
1617 if (!parser->cont) {
1618 /* skip white space */
1619 while (cnt && isspace(ch)) {
1620 ret = get_user(ch, ubuf++);
1629 /* only spaces were written */
1630 if (isspace(ch) || !ch) {
1637 /* read the non-space input */
1638 while (cnt && !isspace(ch) && ch) {
1639 if (parser->idx < parser->size - 1)
1640 parser->buffer[parser->idx++] = ch;
1645 ret = get_user(ch, ubuf++);
1652 /* We either got finished input or we have to wait for another call. */
1653 if (isspace(ch) || !ch) {
1654 parser->buffer[parser->idx] = 0;
1655 parser->cont = false;
1656 } else if (parser->idx < parser->size - 1) {
1657 parser->cont = true;
1658 parser->buffer[parser->idx++] = ch;
1659 /* Make sure the parsed string always terminates with '\0'. */
1660 parser->buffer[parser->idx] = 0;
1673 /* TODO add a seq_buf_to_buffer() */
1674 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1678 if (trace_seq_used(s) <= s->seq.readpos)
1681 len = trace_seq_used(s) - s->seq.readpos;
1684 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1686 s->seq.readpos += cnt;
1690 unsigned long __read_mostly tracing_thresh;
1691 static const struct file_operations tracing_max_lat_fops;
1693 #ifdef LATENCY_FS_NOTIFY
1695 static struct workqueue_struct *fsnotify_wq;
1697 static void latency_fsnotify_workfn(struct work_struct *work)
1699 struct trace_array *tr = container_of(work, struct trace_array,
1701 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1704 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1706 struct trace_array *tr = container_of(iwork, struct trace_array,
1708 queue_work(fsnotify_wq, &tr->fsnotify_work);
1711 static void trace_create_maxlat_file(struct trace_array *tr,
1712 struct dentry *d_tracer)
1714 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1715 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1716 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1717 d_tracer, &tr->max_latency,
1718 &tracing_max_lat_fops);
1721 __init static int latency_fsnotify_init(void)
1723 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1724 WQ_UNBOUND | WQ_HIGHPRI, 0);
1726 pr_err("Unable to allocate tr_max_lat_wq\n");
1732 late_initcall_sync(latency_fsnotify_init);
1734 void latency_fsnotify(struct trace_array *tr)
1739 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1740 * possible that we are called from __schedule() or do_idle(), which
1741 * could cause a deadlock.
1743 irq_work_queue(&tr->fsnotify_irqwork);
1747 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1748 * defined(CONFIG_FSNOTIFY)
1752 #define trace_create_maxlat_file(tr, d_tracer) \
1753 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1754 &tr->max_latency, &tracing_max_lat_fops)
1758 #ifdef CONFIG_TRACER_MAX_TRACE
1760 * Copy the new maximum trace into the separate maximum-trace
1761 * structure. (this way the maximum trace is permanently saved,
1762 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1765 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1767 struct array_buffer *trace_buf = &tr->array_buffer;
1768 struct array_buffer *max_buf = &tr->max_buffer;
1769 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1770 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1773 max_buf->time_start = data->preempt_timestamp;
1775 max_data->saved_latency = tr->max_latency;
1776 max_data->critical_start = data->critical_start;
1777 max_data->critical_end = data->critical_end;
1779 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1780 max_data->pid = tsk->pid;
1782 * If tsk == current, then use current_uid(), as that does not use
1783 * RCU. The irq tracer can be called out of RCU scope.
1786 max_data->uid = current_uid();
1788 max_data->uid = task_uid(tsk);
1790 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1791 max_data->policy = tsk->policy;
1792 max_data->rt_priority = tsk->rt_priority;
1794 /* record this tasks comm */
1795 tracing_record_cmdline(tsk);
1796 latency_fsnotify(tr);
1800 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1802 * @tsk: the task with the latency
1803 * @cpu: The cpu that initiated the trace.
1804 * @cond_data: User data associated with a conditional snapshot
1806 * Flip the buffers between the @tr and the max_tr and record information
1807 * about which task was the cause of this latency.
1810 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1816 WARN_ON_ONCE(!irqs_disabled());
1818 if (!tr->allocated_snapshot) {
1819 /* Only the nop tracer should hit this when disabling */
1820 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1824 arch_spin_lock(&tr->max_lock);
1826 /* Inherit the recordable setting from array_buffer */
1827 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1828 ring_buffer_record_on(tr->max_buffer.buffer);
1830 ring_buffer_record_off(tr->max_buffer.buffer);
1832 #ifdef CONFIG_TRACER_SNAPSHOT
1833 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1836 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1838 __update_max_tr(tr, tsk, cpu);
1841 arch_spin_unlock(&tr->max_lock);
1845 * update_max_tr_single - only copy one trace over, and reset the rest
1847 * @tsk: task with the latency
1848 * @cpu: the cpu of the buffer to copy.
1850 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1853 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1860 WARN_ON_ONCE(!irqs_disabled());
1861 if (!tr->allocated_snapshot) {
1862 /* Only the nop tracer should hit this when disabling */
1863 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1867 arch_spin_lock(&tr->max_lock);
1869 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1871 if (ret == -EBUSY) {
1873 * We failed to swap the buffer due to a commit taking
1874 * place on this CPU. We fail to record, but we reset
1875 * the max trace buffer (no one writes directly to it)
1876 * and flag that it failed.
1878 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1879 "Failed to swap buffers due to commit in progress\n");
1882 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1884 __update_max_tr(tr, tsk, cpu);
1885 arch_spin_unlock(&tr->max_lock);
1887 #endif /* CONFIG_TRACER_MAX_TRACE */
1889 static int wait_on_pipe(struct trace_iterator *iter, int full)
1891 /* Iterators are static, they should be filled or empty */
1892 if (trace_buffer_iter(iter, iter->cpu_file))
1895 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1899 #ifdef CONFIG_FTRACE_STARTUP_TEST
1900 static bool selftests_can_run;
1902 struct trace_selftests {
1903 struct list_head list;
1904 struct tracer *type;
1907 static LIST_HEAD(postponed_selftests);
1909 static int save_selftest(struct tracer *type)
1911 struct trace_selftests *selftest;
1913 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1917 selftest->type = type;
1918 list_add(&selftest->list, &postponed_selftests);
1922 static int run_tracer_selftest(struct tracer *type)
1924 struct trace_array *tr = &global_trace;
1925 struct tracer *saved_tracer = tr->current_trace;
1928 if (!type->selftest || tracing_selftest_disabled)
1932 * If a tracer registers early in boot up (before scheduling is
1933 * initialized and such), then do not run its selftests yet.
1934 * Instead, run it a little later in the boot process.
1936 if (!selftests_can_run)
1937 return save_selftest(type);
1939 if (!tracing_is_on()) {
1940 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1946 * Run a selftest on this tracer.
1947 * Here we reset the trace buffer, and set the current
1948 * tracer to be this tracer. The tracer can then run some
1949 * internal tracing to verify that everything is in order.
1950 * If we fail, we do not register this tracer.
1952 tracing_reset_online_cpus(&tr->array_buffer);
1954 tr->current_trace = type;
1956 #ifdef CONFIG_TRACER_MAX_TRACE
1957 if (type->use_max_tr) {
1958 /* If we expanded the buffers, make sure the max is expanded too */
1959 if (ring_buffer_expanded)
1960 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1961 RING_BUFFER_ALL_CPUS);
1962 tr->allocated_snapshot = true;
1966 /* the test is responsible for initializing and enabling */
1967 pr_info("Testing tracer %s: ", type->name);
1968 ret = type->selftest(type, tr);
1969 /* the test is responsible for resetting too */
1970 tr->current_trace = saved_tracer;
1972 printk(KERN_CONT "FAILED!\n");
1973 /* Add the warning after printing 'FAILED' */
1977 /* Only reset on passing, to avoid touching corrupted buffers */
1978 tracing_reset_online_cpus(&tr->array_buffer);
1980 #ifdef CONFIG_TRACER_MAX_TRACE
1981 if (type->use_max_tr) {
1982 tr->allocated_snapshot = false;
1984 /* Shrink the max buffer again */
1985 if (ring_buffer_expanded)
1986 ring_buffer_resize(tr->max_buffer.buffer, 1,
1987 RING_BUFFER_ALL_CPUS);
1991 printk(KERN_CONT "PASSED\n");
1995 static __init int init_trace_selftests(void)
1997 struct trace_selftests *p, *n;
1998 struct tracer *t, **last;
2001 selftests_can_run = true;
2003 mutex_lock(&trace_types_lock);
2005 if (list_empty(&postponed_selftests))
2008 pr_info("Running postponed tracer tests:\n");
2010 tracing_selftest_running = true;
2011 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2012 /* This loop can take minutes when sanitizers are enabled, so
2013 * lets make sure we allow RCU processing.
2016 ret = run_tracer_selftest(p->type);
2017 /* If the test fails, then warn and remove from available_tracers */
2019 WARN(1, "tracer: %s failed selftest, disabling\n",
2021 last = &trace_types;
2022 for (t = trace_types; t; t = t->next) {
2033 tracing_selftest_running = false;
2036 mutex_unlock(&trace_types_lock);
2040 core_initcall(init_trace_selftests);
2042 static inline int run_tracer_selftest(struct tracer *type)
2046 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2048 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2050 static void __init apply_trace_boot_options(void);
2053 * register_tracer - register a tracer with the ftrace system.
2054 * @type: the plugin for the tracer
2056 * Register a new plugin tracer.
2058 int __init register_tracer(struct tracer *type)
2064 pr_info("Tracer must have a name\n");
2068 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2069 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2073 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2074 pr_warn("Can not register tracer %s due to lockdown\n",
2079 mutex_lock(&trace_types_lock);
2081 tracing_selftest_running = true;
2083 for (t = trace_types; t; t = t->next) {
2084 if (strcmp(type->name, t->name) == 0) {
2086 pr_info("Tracer %s already registered\n",
2093 if (!type->set_flag)
2094 type->set_flag = &dummy_set_flag;
2096 /*allocate a dummy tracer_flags*/
2097 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2102 type->flags->val = 0;
2103 type->flags->opts = dummy_tracer_opt;
2105 if (!type->flags->opts)
2106 type->flags->opts = dummy_tracer_opt;
2108 /* store the tracer for __set_tracer_option */
2109 type->flags->trace = type;
2111 ret = run_tracer_selftest(type);
2115 type->next = trace_types;
2117 add_tracer_options(&global_trace, type);
2120 tracing_selftest_running = false;
2121 mutex_unlock(&trace_types_lock);
2123 if (ret || !default_bootup_tracer)
2126 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2129 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2130 /* Do we want this tracer to start on bootup? */
2131 tracing_set_tracer(&global_trace, type->name);
2132 default_bootup_tracer = NULL;
2134 apply_trace_boot_options();
2136 /* disable other selftests, since this will break it. */
2137 disable_tracing_selftest("running a tracer");
2143 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2145 struct trace_buffer *buffer = buf->buffer;
2150 ring_buffer_record_disable(buffer);
2152 /* Make sure all commits have finished */
2154 ring_buffer_reset_cpu(buffer, cpu);
2156 ring_buffer_record_enable(buffer);
2159 void tracing_reset_online_cpus(struct array_buffer *buf)
2161 struct trace_buffer *buffer = buf->buffer;
2166 ring_buffer_record_disable(buffer);
2168 /* Make sure all commits have finished */
2171 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2173 ring_buffer_reset_online_cpus(buffer);
2175 ring_buffer_record_enable(buffer);
2178 /* Must have trace_types_lock held */
2179 void tracing_reset_all_online_cpus(void)
2181 struct trace_array *tr;
2183 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2184 if (!tr->clear_trace)
2186 tr->clear_trace = false;
2187 tracing_reset_online_cpus(&tr->array_buffer);
2188 #ifdef CONFIG_TRACER_MAX_TRACE
2189 tracing_reset_online_cpus(&tr->max_buffer);
2194 static int *tgid_map;
2196 #define SAVED_CMDLINES_DEFAULT 128
2197 #define NO_CMDLINE_MAP UINT_MAX
2198 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2199 struct saved_cmdlines_buffer {
2200 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2201 unsigned *map_cmdline_to_pid;
2202 unsigned cmdline_num;
2204 char *saved_cmdlines;
2206 static struct saved_cmdlines_buffer *savedcmd;
2208 /* temporary disable recording */
2209 static atomic_t trace_record_taskinfo_disabled __read_mostly;
2211 static inline char *get_saved_cmdlines(int idx)
2213 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2216 static inline void set_cmdline(int idx, const char *cmdline)
2218 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2221 static int allocate_cmdlines_buffer(unsigned int val,
2222 struct saved_cmdlines_buffer *s)
2224 s->map_cmdline_to_pid = kmalloc_array(val,
2225 sizeof(*s->map_cmdline_to_pid),
2227 if (!s->map_cmdline_to_pid)
2230 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2231 if (!s->saved_cmdlines) {
2232 kfree(s->map_cmdline_to_pid);
2237 s->cmdline_num = val;
2238 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2239 sizeof(s->map_pid_to_cmdline));
2240 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2241 val * sizeof(*s->map_cmdline_to_pid));
2246 static int trace_create_savedcmd(void)
2250 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2254 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2264 int is_tracing_stopped(void)
2266 return global_trace.stop_count;
2270 * tracing_start - quick start of the tracer
2272 * If tracing is enabled but was stopped by tracing_stop,
2273 * this will start the tracer back up.
2275 void tracing_start(void)
2277 struct trace_buffer *buffer;
2278 unsigned long flags;
2280 if (tracing_disabled)
2283 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2284 if (--global_trace.stop_count) {
2285 if (global_trace.stop_count < 0) {
2286 /* Someone screwed up their debugging */
2288 global_trace.stop_count = 0;
2293 /* Prevent the buffers from switching */
2294 arch_spin_lock(&global_trace.max_lock);
2296 buffer = global_trace.array_buffer.buffer;
2298 ring_buffer_record_enable(buffer);
2300 #ifdef CONFIG_TRACER_MAX_TRACE
2301 buffer = global_trace.max_buffer.buffer;
2303 ring_buffer_record_enable(buffer);
2306 arch_spin_unlock(&global_trace.max_lock);
2309 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2312 static void tracing_start_tr(struct trace_array *tr)
2314 struct trace_buffer *buffer;
2315 unsigned long flags;
2317 if (tracing_disabled)
2320 /* If global, we need to also start the max tracer */
2321 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2322 return tracing_start();
2324 raw_spin_lock_irqsave(&tr->start_lock, flags);
2326 if (--tr->stop_count) {
2327 if (tr->stop_count < 0) {
2328 /* Someone screwed up their debugging */
2335 buffer = tr->array_buffer.buffer;
2337 ring_buffer_record_enable(buffer);
2340 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2344 * tracing_stop - quick stop of the tracer
2346 * Light weight way to stop tracing. Use in conjunction with
2349 void tracing_stop(void)
2351 struct trace_buffer *buffer;
2352 unsigned long flags;
2354 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2355 if (global_trace.stop_count++)
2358 /* Prevent the buffers from switching */
2359 arch_spin_lock(&global_trace.max_lock);
2361 buffer = global_trace.array_buffer.buffer;
2363 ring_buffer_record_disable(buffer);
2365 #ifdef CONFIG_TRACER_MAX_TRACE
2366 buffer = global_trace.max_buffer.buffer;
2368 ring_buffer_record_disable(buffer);
2371 arch_spin_unlock(&global_trace.max_lock);
2374 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2377 static void tracing_stop_tr(struct trace_array *tr)
2379 struct trace_buffer *buffer;
2380 unsigned long flags;
2382 /* If global, we need to also stop the max tracer */
2383 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2384 return tracing_stop();
2386 raw_spin_lock_irqsave(&tr->start_lock, flags);
2387 if (tr->stop_count++)
2390 buffer = tr->array_buffer.buffer;
2392 ring_buffer_record_disable(buffer);
2395 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2398 static int trace_save_cmdline(struct task_struct *tsk)
2402 /* treat recording of idle task as a success */
2406 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2409 * It's not the end of the world if we don't get
2410 * the lock, but we also don't want to spin
2411 * nor do we want to disable interrupts,
2412 * so if we miss here, then better luck next time.
2414 if (!arch_spin_trylock(&trace_cmdline_lock))
2417 idx = savedcmd->map_pid_to_cmdline[tpid];
2418 if (idx == NO_CMDLINE_MAP) {
2419 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2421 savedcmd->map_pid_to_cmdline[tpid] = idx;
2422 savedcmd->cmdline_idx = idx;
2425 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2426 set_cmdline(idx, tsk->comm);
2428 arch_spin_unlock(&trace_cmdline_lock);
2433 static void __trace_find_cmdline(int pid, char comm[])
2439 strcpy(comm, "<idle>");
2443 if (WARN_ON_ONCE(pid < 0)) {
2444 strcpy(comm, "<XXX>");
2448 tpid = pid & (PID_MAX_DEFAULT - 1);
2449 map = savedcmd->map_pid_to_cmdline[tpid];
2450 if (map != NO_CMDLINE_MAP) {
2451 tpid = savedcmd->map_cmdline_to_pid[map];
2453 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2457 strcpy(comm, "<...>");
2460 void trace_find_cmdline(int pid, char comm[])
2463 arch_spin_lock(&trace_cmdline_lock);
2465 __trace_find_cmdline(pid, comm);
2467 arch_spin_unlock(&trace_cmdline_lock);
2471 int trace_find_tgid(int pid)
2473 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2476 return tgid_map[pid];
2479 static int trace_save_tgid(struct task_struct *tsk)
2481 /* treat recording of idle task as a success */
2485 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2488 tgid_map[tsk->pid] = tsk->tgid;
2492 static bool tracing_record_taskinfo_skip(int flags)
2494 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2496 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2498 if (!__this_cpu_read(trace_taskinfo_save))
2504 * tracing_record_taskinfo - record the task info of a task
2506 * @task: task to record
2507 * @flags: TRACE_RECORD_CMDLINE for recording comm
2508 * TRACE_RECORD_TGID for recording tgid
2510 void tracing_record_taskinfo(struct task_struct *task, int flags)
2514 if (tracing_record_taskinfo_skip(flags))
2518 * Record as much task information as possible. If some fail, continue
2519 * to try to record the others.
2521 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2522 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2524 /* If recording any information failed, retry again soon. */
2528 __this_cpu_write(trace_taskinfo_save, false);
2532 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2534 * @prev: previous task during sched_switch
2535 * @next: next task during sched_switch
2536 * @flags: TRACE_RECORD_CMDLINE for recording comm
2537 * TRACE_RECORD_TGID for recording tgid
2539 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2540 struct task_struct *next, int flags)
2544 if (tracing_record_taskinfo_skip(flags))
2548 * Record as much task information as possible. If some fail, continue
2549 * to try to record the others.
2551 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2552 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2553 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2554 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2556 /* If recording any information failed, retry again soon. */
2560 __this_cpu_write(trace_taskinfo_save, false);
2563 /* Helpers to record a specific task information */
2564 void tracing_record_cmdline(struct task_struct *task)
2566 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2569 void tracing_record_tgid(struct task_struct *task)
2571 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2575 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2576 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2577 * simplifies those functions and keeps them in sync.
2579 enum print_line_t trace_handle_return(struct trace_seq *s)
2581 return trace_seq_has_overflowed(s) ?
2582 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2584 EXPORT_SYMBOL_GPL(trace_handle_return);
2586 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2588 unsigned int trace_flags = irqs_status;
2591 pc = preempt_count();
2594 trace_flags |= TRACE_FLAG_NMI;
2595 if (pc & HARDIRQ_MASK)
2596 trace_flags |= TRACE_FLAG_HARDIRQ;
2597 if (in_serving_softirq())
2598 trace_flags |= TRACE_FLAG_SOFTIRQ;
2600 if (tif_need_resched())
2601 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2602 if (test_preempt_need_resched())
2603 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2604 return (trace_flags << 16) | (pc & 0xff);
2607 struct ring_buffer_event *
2608 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2611 unsigned int trace_ctx)
2613 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2616 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2617 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2618 static int trace_buffered_event_ref;
2621 * trace_buffered_event_enable - enable buffering events
2623 * When events are being filtered, it is quicker to use a temporary
2624 * buffer to write the event data into if there's a likely chance
2625 * that it will not be committed. The discard of the ring buffer
2626 * is not as fast as committing, and is much slower than copying
2629 * When an event is to be filtered, allocate per cpu buffers to
2630 * write the event data into, and if the event is filtered and discarded
2631 * it is simply dropped, otherwise, the entire data is to be committed
2634 void trace_buffered_event_enable(void)
2636 struct ring_buffer_event *event;
2640 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2642 if (trace_buffered_event_ref++)
2645 for_each_tracing_cpu(cpu) {
2646 page = alloc_pages_node(cpu_to_node(cpu),
2647 GFP_KERNEL | __GFP_NORETRY, 0);
2651 event = page_address(page);
2652 memset(event, 0, sizeof(*event));
2654 per_cpu(trace_buffered_event, cpu) = event;
2657 if (cpu == smp_processor_id() &&
2658 __this_cpu_read(trace_buffered_event) !=
2659 per_cpu(trace_buffered_event, cpu))
2666 trace_buffered_event_disable();
2669 static void enable_trace_buffered_event(void *data)
2671 /* Probably not needed, but do it anyway */
2673 this_cpu_dec(trace_buffered_event_cnt);
2676 static void disable_trace_buffered_event(void *data)
2678 this_cpu_inc(trace_buffered_event_cnt);
2682 * trace_buffered_event_disable - disable buffering events
2684 * When a filter is removed, it is faster to not use the buffered
2685 * events, and to commit directly into the ring buffer. Free up
2686 * the temp buffers when there are no more users. This requires
2687 * special synchronization with current events.
2689 void trace_buffered_event_disable(void)
2693 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2695 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2698 if (--trace_buffered_event_ref)
2702 /* For each CPU, set the buffer as used. */
2703 smp_call_function_many(tracing_buffer_mask,
2704 disable_trace_buffered_event, NULL, 1);
2707 /* Wait for all current users to finish */
2710 for_each_tracing_cpu(cpu) {
2711 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2712 per_cpu(trace_buffered_event, cpu) = NULL;
2715 * Make sure trace_buffered_event is NULL before clearing
2716 * trace_buffered_event_cnt.
2721 /* Do the work on each cpu */
2722 smp_call_function_many(tracing_buffer_mask,
2723 enable_trace_buffered_event, NULL, 1);
2727 static struct trace_buffer *temp_buffer;
2729 struct ring_buffer_event *
2730 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2731 struct trace_event_file *trace_file,
2732 int type, unsigned long len,
2733 unsigned int trace_ctx)
2735 struct ring_buffer_event *entry;
2736 struct trace_array *tr = trace_file->tr;
2739 *current_rb = tr->array_buffer.buffer;
2741 if (!tr->no_filter_buffering_ref &&
2742 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2743 (entry = this_cpu_read(trace_buffered_event))) {
2745 * Filtering is on, so try to use the per cpu buffer first.
2746 * This buffer will simulate a ring_buffer_event,
2747 * where the type_len is zero and the array[0] will
2748 * hold the full length.
2749 * (see include/linux/ring-buffer.h for details on
2750 * how the ring_buffer_event is structured).
2752 * Using a temp buffer during filtering and copying it
2753 * on a matched filter is quicker than writing directly
2754 * into the ring buffer and then discarding it when
2755 * it doesn't match. That is because the discard
2756 * requires several atomic operations to get right.
2757 * Copying on match and doing nothing on a failed match
2758 * is still quicker than no copy on match, but having
2759 * to discard out of the ring buffer on a failed match.
2761 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2763 val = this_cpu_inc_return(trace_buffered_event_cnt);
2766 * Preemption is disabled, but interrupts and NMIs
2767 * can still come in now. If that happens after
2768 * the above increment, then it will have to go
2769 * back to the old method of allocating the event
2770 * on the ring buffer, and if the filter fails, it
2771 * will have to call ring_buffer_discard_commit()
2774 * Need to also check the unlikely case that the
2775 * length is bigger than the temp buffer size.
2776 * If that happens, then the reserve is pretty much
2777 * guaranteed to fail, as the ring buffer currently
2778 * only allows events less than a page. But that may
2779 * change in the future, so let the ring buffer reserve
2780 * handle the failure in that case.
2782 if (val == 1 && likely(len <= max_len)) {
2783 trace_event_setup(entry, type, trace_ctx);
2784 entry->array[0] = len;
2787 this_cpu_dec(trace_buffered_event_cnt);
2790 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2793 * If tracing is off, but we have triggers enabled
2794 * we still need to look at the event data. Use the temp_buffer
2795 * to store the trace event for the trigger to use. It's recursive
2796 * safe and will not be recorded anywhere.
2798 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2799 *current_rb = temp_buffer;
2800 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2805 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2807 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2808 static DEFINE_MUTEX(tracepoint_printk_mutex);
2810 static void output_printk(struct trace_event_buffer *fbuffer)
2812 struct trace_event_call *event_call;
2813 struct trace_event_file *file;
2814 struct trace_event *event;
2815 unsigned long flags;
2816 struct trace_iterator *iter = tracepoint_print_iter;
2818 /* We should never get here if iter is NULL */
2819 if (WARN_ON_ONCE(!iter))
2822 event_call = fbuffer->trace_file->event_call;
2823 if (!event_call || !event_call->event.funcs ||
2824 !event_call->event.funcs->trace)
2827 file = fbuffer->trace_file;
2828 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2829 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2830 !filter_match_preds(file->filter, fbuffer->entry)))
2833 event = &fbuffer->trace_file->event_call->event;
2835 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2836 trace_seq_init(&iter->seq);
2837 iter->ent = fbuffer->entry;
2838 event_call->event.funcs->trace(iter, 0, event);
2839 trace_seq_putc(&iter->seq, 0);
2840 printk("%s", iter->seq.buffer);
2842 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2845 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2846 void *buffer, size_t *lenp,
2849 int save_tracepoint_printk;
2852 mutex_lock(&tracepoint_printk_mutex);
2853 save_tracepoint_printk = tracepoint_printk;
2855 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2858 * This will force exiting early, as tracepoint_printk
2859 * is always zero when tracepoint_printk_iter is not allocated
2861 if (!tracepoint_print_iter)
2862 tracepoint_printk = 0;
2864 if (save_tracepoint_printk == tracepoint_printk)
2867 if (tracepoint_printk)
2868 static_key_enable(&tracepoint_printk_key.key);
2870 static_key_disable(&tracepoint_printk_key.key);
2873 mutex_unlock(&tracepoint_printk_mutex);
2878 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2880 if (static_key_false(&tracepoint_printk_key.key))
2881 output_printk(fbuffer);
2883 if (static_branch_unlikely(&trace_event_exports_enabled))
2884 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2885 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2886 fbuffer->event, fbuffer->entry,
2887 fbuffer->trace_ctx, fbuffer->regs);
2889 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2894 * trace_buffer_unlock_commit_regs()
2895 * trace_event_buffer_commit()
2896 * trace_event_raw_event_xxx()
2898 # define STACK_SKIP 3
2900 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2901 struct trace_buffer *buffer,
2902 struct ring_buffer_event *event,
2903 unsigned int trace_ctx,
2904 struct pt_regs *regs)
2906 __buffer_unlock_commit(buffer, event);
2909 * If regs is not set, then skip the necessary functions.
2910 * Note, we can still get here via blktrace, wakeup tracer
2911 * and mmiotrace, but that's ok if they lose a function or
2912 * two. They are not that meaningful.
2914 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2915 ftrace_trace_userstack(tr, buffer, trace_ctx);
2919 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2922 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2923 struct ring_buffer_event *event)
2925 __buffer_unlock_commit(buffer, event);
2929 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2930 parent_ip, unsigned int trace_ctx)
2932 struct trace_event_call *call = &event_function;
2933 struct trace_buffer *buffer = tr->array_buffer.buffer;
2934 struct ring_buffer_event *event;
2935 struct ftrace_entry *entry;
2937 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2941 entry = ring_buffer_event_data(event);
2943 entry->parent_ip = parent_ip;
2945 if (!call_filter_check_discard(call, entry, buffer, event)) {
2946 if (static_branch_unlikely(&trace_function_exports_enabled))
2947 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2948 __buffer_unlock_commit(buffer, event);
2952 #ifdef CONFIG_STACKTRACE
2954 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2955 #define FTRACE_KSTACK_NESTING 4
2957 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2959 struct ftrace_stack {
2960 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2964 struct ftrace_stacks {
2965 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2968 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2969 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2971 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2972 unsigned int trace_ctx,
2973 int skip, struct pt_regs *regs)
2975 struct trace_event_call *call = &event_kernel_stack;
2976 struct ring_buffer_event *event;
2977 unsigned int size, nr_entries;
2978 struct ftrace_stack *fstack;
2979 struct stack_entry *entry;
2983 * Add one, for this function and the call to save_stack_trace()
2984 * If regs is set, then these functions will not be in the way.
2986 #ifndef CONFIG_UNWINDER_ORC
2991 preempt_disable_notrace();
2993 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2995 /* This should never happen. If it does, yell once and skip */
2996 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3000 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3001 * interrupt will either see the value pre increment or post
3002 * increment. If the interrupt happens pre increment it will have
3003 * restored the counter when it returns. We just need a barrier to
3004 * keep gcc from moving things around.
3008 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3009 size = ARRAY_SIZE(fstack->calls);
3012 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3015 nr_entries = stack_trace_save(fstack->calls, size, skip);
3018 size = nr_entries * sizeof(unsigned long);
3019 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3020 (sizeof(*entry) - sizeof(entry->caller)) + size,
3024 entry = ring_buffer_event_data(event);
3026 memcpy(&entry->caller, fstack->calls, size);
3027 entry->size = nr_entries;
3029 if (!call_filter_check_discard(call, entry, buffer, event))
3030 __buffer_unlock_commit(buffer, event);
3033 /* Again, don't let gcc optimize things here */
3035 __this_cpu_dec(ftrace_stack_reserve);
3036 preempt_enable_notrace();
3040 static inline void ftrace_trace_stack(struct trace_array *tr,
3041 struct trace_buffer *buffer,
3042 unsigned int trace_ctx,
3043 int skip, struct pt_regs *regs)
3045 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3048 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3051 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3054 struct trace_buffer *buffer = tr->array_buffer.buffer;
3056 if (rcu_is_watching()) {
3057 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3062 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3063 * but if the above rcu_is_watching() failed, then the NMI
3064 * triggered someplace critical, and rcu_irq_enter() should
3065 * not be called from NMI.
3067 if (unlikely(in_nmi()))
3070 rcu_irq_enter_irqson();
3071 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3072 rcu_irq_exit_irqson();
3076 * trace_dump_stack - record a stack back trace in the trace buffer
3077 * @skip: Number of functions to skip (helper handlers)
3079 void trace_dump_stack(int skip)
3081 if (tracing_disabled || tracing_selftest_running)
3084 #ifndef CONFIG_UNWINDER_ORC
3085 /* Skip 1 to skip this function. */
3088 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3089 tracing_gen_ctx(), skip, NULL);
3091 EXPORT_SYMBOL_GPL(trace_dump_stack);
3093 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3094 static DEFINE_PER_CPU(int, user_stack_count);
3097 ftrace_trace_userstack(struct trace_array *tr,
3098 struct trace_buffer *buffer, unsigned int trace_ctx)
3100 struct trace_event_call *call = &event_user_stack;
3101 struct ring_buffer_event *event;
3102 struct userstack_entry *entry;
3104 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3108 * NMIs can not handle page faults, even with fix ups.
3109 * The save user stack can (and often does) fault.
3111 if (unlikely(in_nmi()))
3115 * prevent recursion, since the user stack tracing may
3116 * trigger other kernel events.
3119 if (__this_cpu_read(user_stack_count))
3122 __this_cpu_inc(user_stack_count);
3124 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3125 sizeof(*entry), trace_ctx);
3127 goto out_drop_count;
3128 entry = ring_buffer_event_data(event);
3130 entry->tgid = current->tgid;
3131 memset(&entry->caller, 0, sizeof(entry->caller));
3133 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3134 if (!call_filter_check_discard(call, entry, buffer, event))
3135 __buffer_unlock_commit(buffer, event);
3138 __this_cpu_dec(user_stack_count);
3142 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3143 static void ftrace_trace_userstack(struct trace_array *tr,
3144 struct trace_buffer *buffer,
3145 unsigned int trace_ctx)
3148 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3150 #endif /* CONFIG_STACKTRACE */
3153 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3154 unsigned long long delta)
3156 entry->bottom_delta_ts = delta & U32_MAX;
3157 entry->top_delta_ts = (delta >> 32);
3160 void trace_last_func_repeats(struct trace_array *tr,
3161 struct trace_func_repeats *last_info,
3162 unsigned int trace_ctx)
3164 struct trace_buffer *buffer = tr->array_buffer.buffer;
3165 struct func_repeats_entry *entry;
3166 struct ring_buffer_event *event;
3169 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3170 sizeof(*entry), trace_ctx);
3174 delta = ring_buffer_event_time_stamp(buffer, event) -
3175 last_info->ts_last_call;
3177 entry = ring_buffer_event_data(event);
3178 entry->ip = last_info->ip;
3179 entry->parent_ip = last_info->parent_ip;
3180 entry->count = last_info->count;
3181 func_repeats_set_delta_ts(entry, delta);
3183 __buffer_unlock_commit(buffer, event);
3186 /* created for use with alloc_percpu */
3187 struct trace_buffer_struct {
3189 char buffer[4][TRACE_BUF_SIZE];
3192 static struct trace_buffer_struct *trace_percpu_buffer;
3195 * This allows for lockless recording. If we're nested too deeply, then
3196 * this returns NULL.
3198 static char *get_trace_buf(void)
3200 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3202 if (!buffer || buffer->nesting >= 4)
3207 /* Interrupts must see nesting incremented before we use the buffer */
3209 return &buffer->buffer[buffer->nesting - 1][0];
3212 static void put_trace_buf(void)
3214 /* Don't let the decrement of nesting leak before this */
3216 this_cpu_dec(trace_percpu_buffer->nesting);
3219 static int alloc_percpu_trace_buffer(void)
3221 struct trace_buffer_struct *buffers;
3223 if (trace_percpu_buffer)
3226 buffers = alloc_percpu(struct trace_buffer_struct);
3227 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3230 trace_percpu_buffer = buffers;
3234 static int buffers_allocated;
3236 void trace_printk_init_buffers(void)
3238 if (buffers_allocated)
3241 if (alloc_percpu_trace_buffer())
3244 /* trace_printk() is for debug use only. Don't use it in production. */
3247 pr_warn("**********************************************************\n");
3248 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3250 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3252 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3253 pr_warn("** unsafe for production use. **\n");
3255 pr_warn("** If you see this message and you are not debugging **\n");
3256 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3258 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3259 pr_warn("**********************************************************\n");
3261 /* Expand the buffers to set size */
3262 tracing_update_buffers();
3264 buffers_allocated = 1;
3267 * trace_printk_init_buffers() can be called by modules.
3268 * If that happens, then we need to start cmdline recording
3269 * directly here. If the global_trace.buffer is already
3270 * allocated here, then this was called by module code.
3272 if (global_trace.array_buffer.buffer)
3273 tracing_start_cmdline_record();
3275 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3277 void trace_printk_start_comm(void)
3279 /* Start tracing comms if trace printk is set */
3280 if (!buffers_allocated)
3282 tracing_start_cmdline_record();
3285 static void trace_printk_start_stop_comm(int enabled)
3287 if (!buffers_allocated)
3291 tracing_start_cmdline_record();
3293 tracing_stop_cmdline_record();
3297 * trace_vbprintk - write binary msg to tracing buffer
3298 * @ip: The address of the caller
3299 * @fmt: The string format to write to the buffer
3300 * @args: Arguments for @fmt
3302 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3304 struct trace_event_call *call = &event_bprint;
3305 struct ring_buffer_event *event;
3306 struct trace_buffer *buffer;
3307 struct trace_array *tr = &global_trace;
3308 struct bprint_entry *entry;
3309 unsigned int trace_ctx;
3313 if (unlikely(tracing_selftest_running || tracing_disabled))
3316 /* Don't pollute graph traces with trace_vprintk internals */
3317 pause_graph_tracing();
3319 trace_ctx = tracing_gen_ctx();
3320 preempt_disable_notrace();
3322 tbuffer = get_trace_buf();
3328 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3330 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3333 size = sizeof(*entry) + sizeof(u32) * len;
3334 buffer = tr->array_buffer.buffer;
3335 ring_buffer_nest_start(buffer);
3336 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3340 entry = ring_buffer_event_data(event);
3344 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3345 if (!call_filter_check_discard(call, entry, buffer, event)) {
3346 __buffer_unlock_commit(buffer, event);
3347 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3351 ring_buffer_nest_end(buffer);
3356 preempt_enable_notrace();
3357 unpause_graph_tracing();
3361 EXPORT_SYMBOL_GPL(trace_vbprintk);
3365 __trace_array_vprintk(struct trace_buffer *buffer,
3366 unsigned long ip, const char *fmt, va_list args)
3368 struct trace_event_call *call = &event_print;
3369 struct ring_buffer_event *event;
3371 struct print_entry *entry;
3372 unsigned int trace_ctx;
3375 if (tracing_disabled || tracing_selftest_running)
3378 /* Don't pollute graph traces with trace_vprintk internals */
3379 pause_graph_tracing();
3381 trace_ctx = tracing_gen_ctx();
3382 preempt_disable_notrace();
3385 tbuffer = get_trace_buf();
3391 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3393 size = sizeof(*entry) + len + 1;
3394 ring_buffer_nest_start(buffer);
3395 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3399 entry = ring_buffer_event_data(event);
3402 memcpy(&entry->buf, tbuffer, len + 1);
3403 if (!call_filter_check_discard(call, entry, buffer, event)) {
3404 __buffer_unlock_commit(buffer, event);
3405 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3409 ring_buffer_nest_end(buffer);
3413 preempt_enable_notrace();
3414 unpause_graph_tracing();
3420 int trace_array_vprintk(struct trace_array *tr,
3421 unsigned long ip, const char *fmt, va_list args)
3423 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3427 * trace_array_printk - Print a message to a specific instance
3428 * @tr: The instance trace_array descriptor
3429 * @ip: The instruction pointer that this is called from.
3430 * @fmt: The format to print (printf format)
3432 * If a subsystem sets up its own instance, they have the right to
3433 * printk strings into their tracing instance buffer using this
3434 * function. Note, this function will not write into the top level
3435 * buffer (use trace_printk() for that), as writing into the top level
3436 * buffer should only have events that can be individually disabled.
3437 * trace_printk() is only used for debugging a kernel, and should not
3438 * be ever incorporated in normal use.
3440 * trace_array_printk() can be used, as it will not add noise to the
3441 * top level tracing buffer.
3443 * Note, trace_array_init_printk() must be called on @tr before this
3447 int trace_array_printk(struct trace_array *tr,
3448 unsigned long ip, const char *fmt, ...)
3456 /* This is only allowed for created instances */
3457 if (tr == &global_trace)
3460 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3464 ret = trace_array_vprintk(tr, ip, fmt, ap);
3468 EXPORT_SYMBOL_GPL(trace_array_printk);
3471 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3472 * @tr: The trace array to initialize the buffers for
3474 * As trace_array_printk() only writes into instances, they are OK to
3475 * have in the kernel (unlike trace_printk()). This needs to be called
3476 * before trace_array_printk() can be used on a trace_array.
3478 int trace_array_init_printk(struct trace_array *tr)
3483 /* This is only allowed for created instances */
3484 if (tr == &global_trace)
3487 return alloc_percpu_trace_buffer();
3489 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3492 int trace_array_printk_buf(struct trace_buffer *buffer,
3493 unsigned long ip, const char *fmt, ...)
3498 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3502 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3508 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3510 return trace_array_vprintk(&global_trace, ip, fmt, args);
3512 EXPORT_SYMBOL_GPL(trace_vprintk);
3514 static void trace_iterator_increment(struct trace_iterator *iter)
3516 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3520 ring_buffer_iter_advance(buf_iter);
3523 static struct trace_entry *
3524 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3525 unsigned long *lost_events)
3527 struct ring_buffer_event *event;
3528 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3531 event = ring_buffer_iter_peek(buf_iter, ts);
3533 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3534 (unsigned long)-1 : 0;
3536 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3541 iter->ent_size = ring_buffer_event_length(event);
3542 return ring_buffer_event_data(event);
3548 static struct trace_entry *
3549 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3550 unsigned long *missing_events, u64 *ent_ts)
3552 struct trace_buffer *buffer = iter->array_buffer->buffer;
3553 struct trace_entry *ent, *next = NULL;
3554 unsigned long lost_events = 0, next_lost = 0;
3555 int cpu_file = iter->cpu_file;
3556 u64 next_ts = 0, ts;
3562 * If we are in a per_cpu trace file, don't bother by iterating over
3563 * all cpu and peek directly.
3565 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3566 if (ring_buffer_empty_cpu(buffer, cpu_file))
3568 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3570 *ent_cpu = cpu_file;
3575 for_each_tracing_cpu(cpu) {
3577 if (ring_buffer_empty_cpu(buffer, cpu))
3580 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3583 * Pick the entry with the smallest timestamp:
3585 if (ent && (!next || ts < next_ts)) {
3589 next_lost = lost_events;
3590 next_size = iter->ent_size;
3594 iter->ent_size = next_size;
3597 *ent_cpu = next_cpu;
3603 *missing_events = next_lost;
3608 #define STATIC_FMT_BUF_SIZE 128
3609 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3611 static char *trace_iter_expand_format(struct trace_iterator *iter)
3616 * iter->tr is NULL when used with tp_printk, which makes
3617 * this get called where it is not safe to call krealloc().
3619 if (!iter->tr || iter->fmt == static_fmt_buf)
3622 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3625 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3632 /* Returns true if the string is safe to dereference from an event */
3633 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3635 unsigned long addr = (unsigned long)str;
3636 struct trace_event *trace_event;
3637 struct trace_event_call *event;
3639 /* OK if part of the event data */
3640 if ((addr >= (unsigned long)iter->ent) &&
3641 (addr < (unsigned long)iter->ent + iter->ent_size))
3644 /* OK if part of the temp seq buffer */
3645 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3646 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3649 /* Core rodata can not be freed */
3650 if (is_kernel_rodata(addr))
3653 if (trace_is_tracepoint_string(str))
3657 * Now this could be a module event, referencing core module
3658 * data, which is OK.
3663 trace_event = ftrace_find_event(iter->ent->type);
3667 event = container_of(trace_event, struct trace_event_call, event);
3671 /* Would rather have rodata, but this will suffice */
3672 if (within_module_core(addr, event->mod))
3678 static const char *show_buffer(struct trace_seq *s)
3680 struct seq_buf *seq = &s->seq;
3682 seq_buf_terminate(seq);
3687 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3689 static int test_can_verify_check(const char *fmt, ...)
3696 * The verifier is dependent on vsnprintf() modifies the va_list
3697 * passed to it, where it is sent as a reference. Some architectures
3698 * (like x86_32) passes it by value, which means that vsnprintf()
3699 * does not modify the va_list passed to it, and the verifier
3700 * would then need to be able to understand all the values that
3701 * vsnprintf can use. If it is passed by value, then the verifier
3705 vsnprintf(buf, 16, "%d", ap);
3706 ret = va_arg(ap, int);
3712 static void test_can_verify(void)
3714 if (!test_can_verify_check("%d %d", 0, 1)) {
3715 pr_info("trace event string verifier disabled\n");
3716 static_branch_inc(&trace_no_verify);
3721 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3722 * @iter: The iterator that holds the seq buffer and the event being printed
3723 * @fmt: The format used to print the event
3724 * @ap: The va_list holding the data to print from @fmt.
3726 * This writes the data into the @iter->seq buffer using the data from
3727 * @fmt and @ap. If the format has a %s, then the source of the string
3728 * is examined to make sure it is safe to print, otherwise it will
3729 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3732 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3735 const char *p = fmt;
3739 if (WARN_ON_ONCE(!fmt))
3742 if (static_branch_unlikely(&trace_no_verify))
3745 /* Don't bother checking when doing a ftrace_dump() */
3746 if (iter->fmt == static_fmt_buf)
3755 /* We only care about %s and variants */
3756 for (i = 0; p[i]; i++) {
3757 if (i + 1 >= iter->fmt_size) {
3759 * If we can't expand the copy buffer,
3762 if (!trace_iter_expand_format(iter))
3766 if (p[i] == '\\' && p[i+1]) {
3771 /* Need to test cases like %08.*s */
3772 for (j = 1; p[i+j]; j++) {
3773 if (isdigit(p[i+j]) ||
3776 if (p[i+j] == '*') {
3788 /* If no %s found then just print normally */
3792 /* Copy up to the %s, and print that */
3793 strncpy(iter->fmt, p, i);
3794 iter->fmt[i] = '\0';
3795 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3798 len = va_arg(ap, int);
3800 /* The ap now points to the string data of the %s */
3801 str = va_arg(ap, const char *);
3804 * If you hit this warning, it is likely that the
3805 * trace event in question used %s on a string that
3806 * was saved at the time of the event, but may not be
3807 * around when the trace is read. Use __string(),
3808 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3809 * instead. See samples/trace_events/trace-events-sample.h
3812 if (WARN_ONCE(!trace_safe_str(iter, str),
3813 "fmt: '%s' current_buffer: '%s'",
3814 fmt, show_buffer(&iter->seq))) {
3817 /* Try to safely read the string */
3819 if (len + 1 > iter->fmt_size)
3820 len = iter->fmt_size - 1;
3823 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3827 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3831 trace_seq_printf(&iter->seq, "(0x%px)", str);
3833 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3835 str = "[UNSAFE-MEMORY]";
3836 strcpy(iter->fmt, "%s");
3838 strncpy(iter->fmt, p + i, j + 1);
3839 iter->fmt[j+1] = '\0';
3842 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3844 trace_seq_printf(&iter->seq, iter->fmt, str);
3850 trace_seq_vprintf(&iter->seq, p, ap);
3853 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3855 const char *p, *new_fmt;
3858 if (WARN_ON_ONCE(!fmt))
3861 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3865 new_fmt = q = iter->fmt;
3867 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3868 if (!trace_iter_expand_format(iter))
3871 q += iter->fmt - new_fmt;
3872 new_fmt = iter->fmt;
3877 /* Replace %p with %px */
3881 } else if (p[0] == 'p' && !isalnum(p[1])) {
3892 #define STATIC_TEMP_BUF_SIZE 128
3893 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3895 /* Find the next real entry, without updating the iterator itself */
3896 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3897 int *ent_cpu, u64 *ent_ts)
3899 /* __find_next_entry will reset ent_size */
3900 int ent_size = iter->ent_size;
3901 struct trace_entry *entry;
3904 * If called from ftrace_dump(), then the iter->temp buffer
3905 * will be the static_temp_buf and not created from kmalloc.
3906 * If the entry size is greater than the buffer, we can
3907 * not save it. Just return NULL in that case. This is only
3908 * used to add markers when two consecutive events' time
3909 * stamps have a large delta. See trace_print_lat_context()
3911 if (iter->temp == static_temp_buf &&
3912 STATIC_TEMP_BUF_SIZE < ent_size)
3916 * The __find_next_entry() may call peek_next_entry(), which may
3917 * call ring_buffer_peek() that may make the contents of iter->ent
3918 * undefined. Need to copy iter->ent now.
3920 if (iter->ent && iter->ent != iter->temp) {
3921 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3922 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3924 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3929 iter->temp_size = iter->ent_size;
3931 memcpy(iter->temp, iter->ent, iter->ent_size);
3932 iter->ent = iter->temp;
3934 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3935 /* Put back the original ent_size */
3936 iter->ent_size = ent_size;
3941 /* Find the next real entry, and increment the iterator to the next entry */
3942 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3944 iter->ent = __find_next_entry(iter, &iter->cpu,
3945 &iter->lost_events, &iter->ts);
3948 trace_iterator_increment(iter);
3950 return iter->ent ? iter : NULL;
3953 static void trace_consume(struct trace_iterator *iter)
3955 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3956 &iter->lost_events);
3959 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3961 struct trace_iterator *iter = m->private;
3965 WARN_ON_ONCE(iter->leftover);
3969 /* can't go backwards */
3974 ent = trace_find_next_entry_inc(iter);
3978 while (ent && iter->idx < i)
3979 ent = trace_find_next_entry_inc(iter);
3986 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3988 struct ring_buffer_iter *buf_iter;
3989 unsigned long entries = 0;
3992 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3994 buf_iter = trace_buffer_iter(iter, cpu);
3998 ring_buffer_iter_reset(buf_iter);
4001 * We could have the case with the max latency tracers
4002 * that a reset never took place on a cpu. This is evident
4003 * by the timestamp being before the start of the buffer.
4005 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4006 if (ts >= iter->array_buffer->time_start)
4009 ring_buffer_iter_advance(buf_iter);
4012 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4016 * The current tracer is copied to avoid a global locking
4019 static void *s_start(struct seq_file *m, loff_t *pos)
4021 struct trace_iterator *iter = m->private;
4022 struct trace_array *tr = iter->tr;
4023 int cpu_file = iter->cpu_file;
4029 * copy the tracer to avoid using a global lock all around.
4030 * iter->trace is a copy of current_trace, the pointer to the
4031 * name may be used instead of a strcmp(), as iter->trace->name
4032 * will point to the same string as current_trace->name.
4034 mutex_lock(&trace_types_lock);
4035 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4036 *iter->trace = *tr->current_trace;
4037 mutex_unlock(&trace_types_lock);
4039 #ifdef CONFIG_TRACER_MAX_TRACE
4040 if (iter->snapshot && iter->trace->use_max_tr)
4041 return ERR_PTR(-EBUSY);
4044 if (!iter->snapshot)
4045 atomic_inc(&trace_record_taskinfo_disabled);
4047 if (*pos != iter->pos) {
4052 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4053 for_each_tracing_cpu(cpu)
4054 tracing_iter_reset(iter, cpu);
4056 tracing_iter_reset(iter, cpu_file);
4059 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4064 * If we overflowed the seq_file before, then we want
4065 * to just reuse the trace_seq buffer again.
4071 p = s_next(m, p, &l);
4075 trace_event_read_lock();
4076 trace_access_lock(cpu_file);
4080 static void s_stop(struct seq_file *m, void *p)
4082 struct trace_iterator *iter = m->private;
4084 #ifdef CONFIG_TRACER_MAX_TRACE
4085 if (iter->snapshot && iter->trace->use_max_tr)
4089 if (!iter->snapshot)
4090 atomic_dec(&trace_record_taskinfo_disabled);
4092 trace_access_unlock(iter->cpu_file);
4093 trace_event_read_unlock();
4097 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4098 unsigned long *entries, int cpu)
4100 unsigned long count;
4102 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4104 * If this buffer has skipped entries, then we hold all
4105 * entries for the trace and we need to ignore the
4106 * ones before the time stamp.
4108 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4109 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4110 /* total is the same as the entries */
4114 ring_buffer_overrun_cpu(buf->buffer, cpu);
4119 get_total_entries(struct array_buffer *buf,
4120 unsigned long *total, unsigned long *entries)
4128 for_each_tracing_cpu(cpu) {
4129 get_total_entries_cpu(buf, &t, &e, cpu);
4135 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4137 unsigned long total, entries;
4142 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4147 unsigned long trace_total_entries(struct trace_array *tr)
4149 unsigned long total, entries;
4154 get_total_entries(&tr->array_buffer, &total, &entries);
4159 static void print_lat_help_header(struct seq_file *m)
4161 seq_puts(m, "# _------=> CPU# \n"
4162 "# / _-----=> irqs-off \n"
4163 "# | / _----=> need-resched \n"
4164 "# || / _---=> hardirq/softirq \n"
4165 "# ||| / _--=> preempt-depth \n"
4167 "# cmd pid ||||| time | caller \n"
4168 "# \\ / ||||| \\ | / \n");
4171 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4173 unsigned long total;
4174 unsigned long entries;
4176 get_total_entries(buf, &total, &entries);
4177 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4178 entries, total, num_online_cpus());
4182 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4185 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4187 print_event_info(buf, m);
4189 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4190 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4193 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4196 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4197 const char *space = " ";
4198 int prec = tgid ? 12 : 2;
4200 print_event_info(buf, m);
4202 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
4203 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4204 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4205 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4206 seq_printf(m, "# %.*s||| / delay\n", prec, space);
4207 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4208 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
4212 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4214 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4215 struct array_buffer *buf = iter->array_buffer;
4216 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4217 struct tracer *type = iter->trace;
4218 unsigned long entries;
4219 unsigned long total;
4220 const char *name = "preemption";
4224 get_total_entries(buf, &total, &entries);
4226 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4228 seq_puts(m, "# -----------------------------------"
4229 "---------------------------------\n");
4230 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4231 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4232 nsecs_to_usecs(data->saved_latency),
4236 #if defined(CONFIG_PREEMPT_NONE)
4238 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
4240 #elif defined(CONFIG_PREEMPT)
4242 #elif defined(CONFIG_PREEMPT_RT)
4247 /* These are reserved for later use */
4250 seq_printf(m, " #P:%d)\n", num_online_cpus());
4254 seq_puts(m, "# -----------------\n");
4255 seq_printf(m, "# | task: %.16s-%d "
4256 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4257 data->comm, data->pid,
4258 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4259 data->policy, data->rt_priority);
4260 seq_puts(m, "# -----------------\n");
4262 if (data->critical_start) {
4263 seq_puts(m, "# => started at: ");
4264 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4265 trace_print_seq(m, &iter->seq);
4266 seq_puts(m, "\n# => ended at: ");
4267 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4268 trace_print_seq(m, &iter->seq);
4269 seq_puts(m, "\n#\n");
4275 static void test_cpu_buff_start(struct trace_iterator *iter)
4277 struct trace_seq *s = &iter->seq;
4278 struct trace_array *tr = iter->tr;
4280 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4283 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4286 if (cpumask_available(iter->started) &&
4287 cpumask_test_cpu(iter->cpu, iter->started))
4290 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4293 if (cpumask_available(iter->started))
4294 cpumask_set_cpu(iter->cpu, iter->started);
4296 /* Don't print started cpu buffer for the first entry of the trace */
4298 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4302 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4304 struct trace_array *tr = iter->tr;
4305 struct trace_seq *s = &iter->seq;
4306 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4307 struct trace_entry *entry;
4308 struct trace_event *event;
4312 test_cpu_buff_start(iter);
4314 event = ftrace_find_event(entry->type);
4316 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4317 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4318 trace_print_lat_context(iter);
4320 trace_print_context(iter);
4323 if (trace_seq_has_overflowed(s))
4324 return TRACE_TYPE_PARTIAL_LINE;
4327 return event->funcs->trace(iter, sym_flags, event);
4329 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4331 return trace_handle_return(s);
4334 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4336 struct trace_array *tr = iter->tr;
4337 struct trace_seq *s = &iter->seq;
4338 struct trace_entry *entry;
4339 struct trace_event *event;
4343 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4344 trace_seq_printf(s, "%d %d %llu ",
4345 entry->pid, iter->cpu, iter->ts);
4347 if (trace_seq_has_overflowed(s))
4348 return TRACE_TYPE_PARTIAL_LINE;
4350 event = ftrace_find_event(entry->type);
4352 return event->funcs->raw(iter, 0, event);
4354 trace_seq_printf(s, "%d ?\n", entry->type);
4356 return trace_handle_return(s);
4359 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4361 struct trace_array *tr = iter->tr;
4362 struct trace_seq *s = &iter->seq;
4363 unsigned char newline = '\n';
4364 struct trace_entry *entry;
4365 struct trace_event *event;
4369 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4370 SEQ_PUT_HEX_FIELD(s, entry->pid);
4371 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4372 SEQ_PUT_HEX_FIELD(s, iter->ts);
4373 if (trace_seq_has_overflowed(s))
4374 return TRACE_TYPE_PARTIAL_LINE;
4377 event = ftrace_find_event(entry->type);
4379 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4380 if (ret != TRACE_TYPE_HANDLED)
4384 SEQ_PUT_FIELD(s, newline);
4386 return trace_handle_return(s);
4389 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4391 struct trace_array *tr = iter->tr;
4392 struct trace_seq *s = &iter->seq;
4393 struct trace_entry *entry;
4394 struct trace_event *event;
4398 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4399 SEQ_PUT_FIELD(s, entry->pid);
4400 SEQ_PUT_FIELD(s, iter->cpu);
4401 SEQ_PUT_FIELD(s, iter->ts);
4402 if (trace_seq_has_overflowed(s))
4403 return TRACE_TYPE_PARTIAL_LINE;
4406 event = ftrace_find_event(entry->type);
4407 return event ? event->funcs->binary(iter, 0, event) :
4411 int trace_empty(struct trace_iterator *iter)
4413 struct ring_buffer_iter *buf_iter;
4416 /* If we are looking at one CPU buffer, only check that one */
4417 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4418 cpu = iter->cpu_file;
4419 buf_iter = trace_buffer_iter(iter, cpu);
4421 if (!ring_buffer_iter_empty(buf_iter))
4424 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4430 for_each_tracing_cpu(cpu) {
4431 buf_iter = trace_buffer_iter(iter, cpu);
4433 if (!ring_buffer_iter_empty(buf_iter))
4436 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4444 /* Called with trace_event_read_lock() held. */
4445 enum print_line_t print_trace_line(struct trace_iterator *iter)
4447 struct trace_array *tr = iter->tr;
4448 unsigned long trace_flags = tr->trace_flags;
4449 enum print_line_t ret;
4451 if (iter->lost_events) {
4452 if (iter->lost_events == (unsigned long)-1)
4453 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4456 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4457 iter->cpu, iter->lost_events);
4458 if (trace_seq_has_overflowed(&iter->seq))
4459 return TRACE_TYPE_PARTIAL_LINE;
4462 if (iter->trace && iter->trace->print_line) {
4463 ret = iter->trace->print_line(iter);
4464 if (ret != TRACE_TYPE_UNHANDLED)
4468 if (iter->ent->type == TRACE_BPUTS &&
4469 trace_flags & TRACE_ITER_PRINTK &&
4470 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4471 return trace_print_bputs_msg_only(iter);
4473 if (iter->ent->type == TRACE_BPRINT &&
4474 trace_flags & TRACE_ITER_PRINTK &&
4475 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4476 return trace_print_bprintk_msg_only(iter);
4478 if (iter->ent->type == TRACE_PRINT &&
4479 trace_flags & TRACE_ITER_PRINTK &&
4480 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4481 return trace_print_printk_msg_only(iter);
4483 if (trace_flags & TRACE_ITER_BIN)
4484 return print_bin_fmt(iter);
4486 if (trace_flags & TRACE_ITER_HEX)
4487 return print_hex_fmt(iter);
4489 if (trace_flags & TRACE_ITER_RAW)
4490 return print_raw_fmt(iter);
4492 return print_trace_fmt(iter);
4495 void trace_latency_header(struct seq_file *m)
4497 struct trace_iterator *iter = m->private;
4498 struct trace_array *tr = iter->tr;
4500 /* print nothing if the buffers are empty */
4501 if (trace_empty(iter))
4504 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4505 print_trace_header(m, iter);
4507 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4508 print_lat_help_header(m);
4511 void trace_default_header(struct seq_file *m)
4513 struct trace_iterator *iter = m->private;
4514 struct trace_array *tr = iter->tr;
4515 unsigned long trace_flags = tr->trace_flags;
4517 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4520 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4521 /* print nothing if the buffers are empty */
4522 if (trace_empty(iter))
4524 print_trace_header(m, iter);
4525 if (!(trace_flags & TRACE_ITER_VERBOSE))
4526 print_lat_help_header(m);
4528 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4529 if (trace_flags & TRACE_ITER_IRQ_INFO)
4530 print_func_help_header_irq(iter->array_buffer,
4533 print_func_help_header(iter->array_buffer, m,
4539 static void test_ftrace_alive(struct seq_file *m)
4541 if (!ftrace_is_dead())
4543 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4544 "# MAY BE MISSING FUNCTION EVENTS\n");
4547 #ifdef CONFIG_TRACER_MAX_TRACE
4548 static void show_snapshot_main_help(struct seq_file *m)
4550 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4551 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4552 "# Takes a snapshot of the main buffer.\n"
4553 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4554 "# (Doesn't have to be '2' works with any number that\n"
4555 "# is not a '0' or '1')\n");
4558 static void show_snapshot_percpu_help(struct seq_file *m)
4560 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4561 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4562 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4563 "# Takes a snapshot of the main buffer for this cpu.\n");
4565 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4566 "# Must use main snapshot file to allocate.\n");
4568 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4569 "# (Doesn't have to be '2' works with any number that\n"
4570 "# is not a '0' or '1')\n");
4573 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4575 if (iter->tr->allocated_snapshot)
4576 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4578 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4580 seq_puts(m, "# Snapshot commands:\n");
4581 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4582 show_snapshot_main_help(m);
4584 show_snapshot_percpu_help(m);
4587 /* Should never be called */
4588 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4591 static int s_show(struct seq_file *m, void *v)
4593 struct trace_iterator *iter = v;
4596 if (iter->ent == NULL) {
4598 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4600 test_ftrace_alive(m);
4602 if (iter->snapshot && trace_empty(iter))
4603 print_snapshot_help(m, iter);
4604 else if (iter->trace && iter->trace->print_header)
4605 iter->trace->print_header(m);
4607 trace_default_header(m);
4609 } else if (iter->leftover) {
4611 * If we filled the seq_file buffer earlier, we
4612 * want to just show it now.
4614 ret = trace_print_seq(m, &iter->seq);
4616 /* ret should this time be zero, but you never know */
4617 iter->leftover = ret;
4620 print_trace_line(iter);
4621 ret = trace_print_seq(m, &iter->seq);
4623 * If we overflow the seq_file buffer, then it will
4624 * ask us for this data again at start up.
4626 * ret is 0 if seq_file write succeeded.
4629 iter->leftover = ret;
4636 * Should be used after trace_array_get(), trace_types_lock
4637 * ensures that i_cdev was already initialized.
4639 static inline int tracing_get_cpu(struct inode *inode)
4641 if (inode->i_cdev) /* See trace_create_cpu_file() */
4642 return (long)inode->i_cdev - 1;
4643 return RING_BUFFER_ALL_CPUS;
4646 static const struct seq_operations tracer_seq_ops = {
4653 static struct trace_iterator *
4654 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4656 struct trace_array *tr = inode->i_private;
4657 struct trace_iterator *iter;
4660 if (tracing_disabled)
4661 return ERR_PTR(-ENODEV);
4663 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4665 return ERR_PTR(-ENOMEM);
4667 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4669 if (!iter->buffer_iter)
4673 * trace_find_next_entry() may need to save off iter->ent.
4674 * It will place it into the iter->temp buffer. As most
4675 * events are less than 128, allocate a buffer of that size.
4676 * If one is greater, then trace_find_next_entry() will
4677 * allocate a new buffer to adjust for the bigger iter->ent.
4678 * It's not critical if it fails to get allocated here.
4680 iter->temp = kmalloc(128, GFP_KERNEL);
4682 iter->temp_size = 128;
4685 * trace_event_printf() may need to modify given format
4686 * string to replace %p with %px so that it shows real address
4687 * instead of hash value. However, that is only for the event
4688 * tracing, other tracer may not need. Defer the allocation
4689 * until it is needed.
4695 * We make a copy of the current tracer to avoid concurrent
4696 * changes on it while we are reading.
4698 mutex_lock(&trace_types_lock);
4699 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4703 *iter->trace = *tr->current_trace;
4705 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4710 #ifdef CONFIG_TRACER_MAX_TRACE
4711 /* Currently only the top directory has a snapshot */
4712 if (tr->current_trace->print_max || snapshot)
4713 iter->array_buffer = &tr->max_buffer;
4716 iter->array_buffer = &tr->array_buffer;
4717 iter->snapshot = snapshot;
4719 iter->cpu_file = tracing_get_cpu(inode);
4720 mutex_init(&iter->mutex);
4722 /* Notify the tracer early; before we stop tracing. */
4723 if (iter->trace->open)
4724 iter->trace->open(iter);
4726 /* Annotate start of buffers if we had overruns */
4727 if (ring_buffer_overruns(iter->array_buffer->buffer))
4728 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4730 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4731 if (trace_clocks[tr->clock_id].in_ns)
4732 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4735 * If pause-on-trace is enabled, then stop the trace while
4736 * dumping, unless this is the "snapshot" file
4738 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4739 tracing_stop_tr(tr);
4741 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4742 for_each_tracing_cpu(cpu) {
4743 iter->buffer_iter[cpu] =
4744 ring_buffer_read_prepare(iter->array_buffer->buffer,
4747 ring_buffer_read_prepare_sync();
4748 for_each_tracing_cpu(cpu) {
4749 ring_buffer_read_start(iter->buffer_iter[cpu]);
4750 tracing_iter_reset(iter, cpu);
4753 cpu = iter->cpu_file;
4754 iter->buffer_iter[cpu] =
4755 ring_buffer_read_prepare(iter->array_buffer->buffer,
4757 ring_buffer_read_prepare_sync();
4758 ring_buffer_read_start(iter->buffer_iter[cpu]);
4759 tracing_iter_reset(iter, cpu);
4762 mutex_unlock(&trace_types_lock);
4767 mutex_unlock(&trace_types_lock);
4770 kfree(iter->buffer_iter);
4772 seq_release_private(inode, file);
4773 return ERR_PTR(-ENOMEM);
4776 int tracing_open_generic(struct inode *inode, struct file *filp)
4780 ret = tracing_check_open_get_tr(NULL);
4784 filp->private_data = inode->i_private;
4788 bool tracing_is_disabled(void)
4790 return (tracing_disabled) ? true: false;
4794 * Open and update trace_array ref count.
4795 * Must have the current trace_array passed to it.
4797 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4799 struct trace_array *tr = inode->i_private;
4802 ret = tracing_check_open_get_tr(tr);
4806 filp->private_data = inode->i_private;
4811 static int tracing_release(struct inode *inode, struct file *file)
4813 struct trace_array *tr = inode->i_private;
4814 struct seq_file *m = file->private_data;
4815 struct trace_iterator *iter;
4818 if (!(file->f_mode & FMODE_READ)) {
4819 trace_array_put(tr);
4823 /* Writes do not use seq_file */
4825 mutex_lock(&trace_types_lock);
4827 for_each_tracing_cpu(cpu) {
4828 if (iter->buffer_iter[cpu])
4829 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4832 if (iter->trace && iter->trace->close)
4833 iter->trace->close(iter);
4835 if (!iter->snapshot && tr->stop_count)
4836 /* reenable tracing if it was previously enabled */
4837 tracing_start_tr(tr);
4839 __trace_array_put(tr);
4841 mutex_unlock(&trace_types_lock);
4843 mutex_destroy(&iter->mutex);
4844 free_cpumask_var(iter->started);
4848 kfree(iter->buffer_iter);
4849 seq_release_private(inode, file);
4854 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4856 struct trace_array *tr = inode->i_private;
4858 trace_array_put(tr);
4862 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4864 struct trace_array *tr = inode->i_private;
4866 trace_array_put(tr);
4868 return single_release(inode, file);
4871 static int tracing_open(struct inode *inode, struct file *file)
4873 struct trace_array *tr = inode->i_private;
4874 struct trace_iterator *iter;
4877 ret = tracing_check_open_get_tr(tr);
4881 /* If this file was open for write, then erase contents */
4882 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4883 int cpu = tracing_get_cpu(inode);
4884 struct array_buffer *trace_buf = &tr->array_buffer;
4886 #ifdef CONFIG_TRACER_MAX_TRACE
4887 if (tr->current_trace->print_max)
4888 trace_buf = &tr->max_buffer;
4891 if (cpu == RING_BUFFER_ALL_CPUS)
4892 tracing_reset_online_cpus(trace_buf);
4894 tracing_reset_cpu(trace_buf, cpu);
4897 if (file->f_mode & FMODE_READ) {
4898 iter = __tracing_open(inode, file, false);
4900 ret = PTR_ERR(iter);
4901 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4902 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4906 trace_array_put(tr);
4912 * Some tracers are not suitable for instance buffers.
4913 * A tracer is always available for the global array (toplevel)
4914 * or if it explicitly states that it is.
4917 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4919 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4922 /* Find the next tracer that this trace array may use */
4923 static struct tracer *
4924 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4926 while (t && !trace_ok_for_array(t, tr))
4933 t_next(struct seq_file *m, void *v, loff_t *pos)
4935 struct trace_array *tr = m->private;
4936 struct tracer *t = v;
4941 t = get_tracer_for_array(tr, t->next);
4946 static void *t_start(struct seq_file *m, loff_t *pos)
4948 struct trace_array *tr = m->private;
4952 mutex_lock(&trace_types_lock);
4954 t = get_tracer_for_array(tr, trace_types);
4955 for (; t && l < *pos; t = t_next(m, t, &l))
4961 static void t_stop(struct seq_file *m, void *p)
4963 mutex_unlock(&trace_types_lock);
4966 static int t_show(struct seq_file *m, void *v)
4968 struct tracer *t = v;
4973 seq_puts(m, t->name);
4982 static const struct seq_operations show_traces_seq_ops = {
4989 static int show_traces_open(struct inode *inode, struct file *file)
4991 struct trace_array *tr = inode->i_private;
4995 ret = tracing_check_open_get_tr(tr);
4999 ret = seq_open(file, &show_traces_seq_ops);
5001 trace_array_put(tr);
5005 m = file->private_data;
5011 static int show_traces_release(struct inode *inode, struct file *file)
5013 struct trace_array *tr = inode->i_private;
5015 trace_array_put(tr);
5016 return seq_release(inode, file);
5020 tracing_write_stub(struct file *filp, const char __user *ubuf,
5021 size_t count, loff_t *ppos)
5026 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5030 if (file->f_mode & FMODE_READ)
5031 ret = seq_lseek(file, offset, whence);
5033 file->f_pos = ret = 0;
5038 static const struct file_operations tracing_fops = {
5039 .open = tracing_open,
5041 .write = tracing_write_stub,
5042 .llseek = tracing_lseek,
5043 .release = tracing_release,
5046 static const struct file_operations show_traces_fops = {
5047 .open = show_traces_open,
5049 .llseek = seq_lseek,
5050 .release = show_traces_release,
5054 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5055 size_t count, loff_t *ppos)
5057 struct trace_array *tr = file_inode(filp)->i_private;
5061 len = snprintf(NULL, 0, "%*pb\n",
5062 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5063 mask_str = kmalloc(len, GFP_KERNEL);
5067 len = snprintf(mask_str, len, "%*pb\n",
5068 cpumask_pr_args(tr->tracing_cpumask));
5073 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5081 int tracing_set_cpumask(struct trace_array *tr,
5082 cpumask_var_t tracing_cpumask_new)
5089 local_irq_disable();
5090 arch_spin_lock(&tr->max_lock);
5091 for_each_tracing_cpu(cpu) {
5093 * Increase/decrease the disabled counter if we are
5094 * about to flip a bit in the cpumask:
5096 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5097 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5098 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5099 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5101 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5102 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5103 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5104 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5107 arch_spin_unlock(&tr->max_lock);
5110 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5116 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5117 size_t count, loff_t *ppos)
5119 struct trace_array *tr = file_inode(filp)->i_private;
5120 cpumask_var_t tracing_cpumask_new;
5123 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5126 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5130 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5134 free_cpumask_var(tracing_cpumask_new);
5139 free_cpumask_var(tracing_cpumask_new);
5144 static const struct file_operations tracing_cpumask_fops = {
5145 .open = tracing_open_generic_tr,
5146 .read = tracing_cpumask_read,
5147 .write = tracing_cpumask_write,
5148 .release = tracing_release_generic_tr,
5149 .llseek = generic_file_llseek,
5152 static int tracing_trace_options_show(struct seq_file *m, void *v)
5154 struct tracer_opt *trace_opts;
5155 struct trace_array *tr = m->private;
5159 mutex_lock(&trace_types_lock);
5160 tracer_flags = tr->current_trace->flags->val;
5161 trace_opts = tr->current_trace->flags->opts;
5163 for (i = 0; trace_options[i]; i++) {
5164 if (tr->trace_flags & (1 << i))
5165 seq_printf(m, "%s\n", trace_options[i]);
5167 seq_printf(m, "no%s\n", trace_options[i]);
5170 for (i = 0; trace_opts[i].name; i++) {
5171 if (tracer_flags & trace_opts[i].bit)
5172 seq_printf(m, "%s\n", trace_opts[i].name);
5174 seq_printf(m, "no%s\n", trace_opts[i].name);
5176 mutex_unlock(&trace_types_lock);
5181 static int __set_tracer_option(struct trace_array *tr,
5182 struct tracer_flags *tracer_flags,
5183 struct tracer_opt *opts, int neg)
5185 struct tracer *trace = tracer_flags->trace;
5188 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5193 tracer_flags->val &= ~opts->bit;
5195 tracer_flags->val |= opts->bit;
5199 /* Try to assign a tracer specific option */
5200 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5202 struct tracer *trace = tr->current_trace;
5203 struct tracer_flags *tracer_flags = trace->flags;
5204 struct tracer_opt *opts = NULL;
5207 for (i = 0; tracer_flags->opts[i].name; i++) {
5208 opts = &tracer_flags->opts[i];
5210 if (strcmp(cmp, opts->name) == 0)
5211 return __set_tracer_option(tr, trace->flags, opts, neg);
5217 /* Some tracers require overwrite to stay enabled */
5218 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5220 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5226 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5228 if ((mask == TRACE_ITER_RECORD_TGID) ||
5229 (mask == TRACE_ITER_RECORD_CMD))
5230 lockdep_assert_held(&event_mutex);
5232 /* do nothing if flag is already set */
5233 if (!!(tr->trace_flags & mask) == !!enabled)
5236 /* Give the tracer a chance to approve the change */
5237 if (tr->current_trace->flag_changed)
5238 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5242 tr->trace_flags |= mask;
5244 tr->trace_flags &= ~mask;
5246 if (mask == TRACE_ITER_RECORD_CMD)
5247 trace_event_enable_cmd_record(enabled);
5249 if (mask == TRACE_ITER_RECORD_TGID) {
5251 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
5255 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5259 trace_event_enable_tgid_record(enabled);
5262 if (mask == TRACE_ITER_EVENT_FORK)
5263 trace_event_follow_fork(tr, enabled);
5265 if (mask == TRACE_ITER_FUNC_FORK)
5266 ftrace_pid_follow_fork(tr, enabled);
5268 if (mask == TRACE_ITER_OVERWRITE) {
5269 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5270 #ifdef CONFIG_TRACER_MAX_TRACE
5271 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5275 if (mask == TRACE_ITER_PRINTK) {
5276 trace_printk_start_stop_comm(enabled);
5277 trace_printk_control(enabled);
5283 int trace_set_options(struct trace_array *tr, char *option)
5288 size_t orig_len = strlen(option);
5291 cmp = strstrip(option);
5293 len = str_has_prefix(cmp, "no");
5299 mutex_lock(&event_mutex);
5300 mutex_lock(&trace_types_lock);
5302 ret = match_string(trace_options, -1, cmp);
5303 /* If no option could be set, test the specific tracer options */
5305 ret = set_tracer_option(tr, cmp, neg);
5307 ret = set_tracer_flag(tr, 1 << ret, !neg);
5309 mutex_unlock(&trace_types_lock);
5310 mutex_unlock(&event_mutex);
5313 * If the first trailing whitespace is replaced with '\0' by strstrip,
5314 * turn it back into a space.
5316 if (orig_len > strlen(option))
5317 option[strlen(option)] = ' ';
5322 static void __init apply_trace_boot_options(void)
5324 char *buf = trace_boot_options_buf;
5328 option = strsep(&buf, ",");
5334 trace_set_options(&global_trace, option);
5336 /* Put back the comma to allow this to be called again */
5343 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5344 size_t cnt, loff_t *ppos)
5346 struct seq_file *m = filp->private_data;
5347 struct trace_array *tr = m->private;
5351 if (cnt >= sizeof(buf))
5354 if (copy_from_user(buf, ubuf, cnt))
5359 ret = trace_set_options(tr, buf);
5368 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5370 struct trace_array *tr = inode->i_private;
5373 ret = tracing_check_open_get_tr(tr);
5377 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5379 trace_array_put(tr);
5384 static const struct file_operations tracing_iter_fops = {
5385 .open = tracing_trace_options_open,
5387 .llseek = seq_lseek,
5388 .release = tracing_single_release_tr,
5389 .write = tracing_trace_options_write,
5392 static const char readme_msg[] =
5393 "tracing mini-HOWTO:\n\n"
5394 "# echo 0 > tracing_on : quick way to disable tracing\n"
5395 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5396 " Important files:\n"
5397 " trace\t\t\t- The static contents of the buffer\n"
5398 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5399 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5400 " current_tracer\t- function and latency tracers\n"
5401 " available_tracers\t- list of configured tracers for current_tracer\n"
5402 " error_log\t- error log for failed commands (that support it)\n"
5403 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5404 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5405 " trace_clock\t\t-change the clock used to order events\n"
5406 " local: Per cpu clock but may not be synced across CPUs\n"
5407 " global: Synced across CPUs but slows tracing down.\n"
5408 " counter: Not a clock, but just an increment\n"
5409 " uptime: Jiffy counter from time of boot\n"
5410 " perf: Same clock that perf events use\n"
5411 #ifdef CONFIG_X86_64
5412 " x86-tsc: TSC cycle counter\n"
5414 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5415 " delta: Delta difference against a buffer-wide timestamp\n"
5416 " absolute: Absolute (standalone) timestamp\n"
5417 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5418 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5419 " tracing_cpumask\t- Limit which CPUs to trace\n"
5420 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5421 "\t\t\t Remove sub-buffer with rmdir\n"
5422 " trace_options\t\t- Set format or modify how tracing happens\n"
5423 "\t\t\t Disable an option by prefixing 'no' to the\n"
5424 "\t\t\t option name\n"
5425 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5426 #ifdef CONFIG_DYNAMIC_FTRACE
5427 "\n available_filter_functions - list of functions that can be filtered on\n"
5428 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5429 "\t\t\t functions\n"
5430 "\t accepts: func_full_name or glob-matching-pattern\n"
5431 "\t modules: Can select a group via module\n"
5432 "\t Format: :mod:<module-name>\n"
5433 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5434 "\t triggers: a command to perform when function is hit\n"
5435 "\t Format: <function>:<trigger>[:count]\n"
5436 "\t trigger: traceon, traceoff\n"
5437 "\t\t enable_event:<system>:<event>\n"
5438 "\t\t disable_event:<system>:<event>\n"
5439 #ifdef CONFIG_STACKTRACE
5442 #ifdef CONFIG_TRACER_SNAPSHOT
5447 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5448 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5449 "\t The first one will disable tracing every time do_fault is hit\n"
5450 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5451 "\t The first time do trap is hit and it disables tracing, the\n"
5452 "\t counter will decrement to 2. If tracing is already disabled,\n"
5453 "\t the counter will not decrement. It only decrements when the\n"
5454 "\t trigger did work\n"
5455 "\t To remove trigger without count:\n"
5456 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5457 "\t To remove trigger with a count:\n"
5458 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5459 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5460 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5461 "\t modules: Can select a group via module command :mod:\n"
5462 "\t Does not accept triggers\n"
5463 #endif /* CONFIG_DYNAMIC_FTRACE */
5464 #ifdef CONFIG_FUNCTION_TRACER
5465 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5467 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5470 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5471 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5472 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5473 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5475 #ifdef CONFIG_TRACER_SNAPSHOT
5476 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5477 "\t\t\t snapshot buffer. Read the contents for more\n"
5478 "\t\t\t information\n"
5480 #ifdef CONFIG_STACK_TRACER
5481 " stack_trace\t\t- Shows the max stack trace when active\n"
5482 " stack_max_size\t- Shows current max stack size that was traced\n"
5483 "\t\t\t Write into this file to reset the max size (trigger a\n"
5484 "\t\t\t new trace)\n"
5485 #ifdef CONFIG_DYNAMIC_FTRACE
5486 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5489 #endif /* CONFIG_STACK_TRACER */
5490 #ifdef CONFIG_DYNAMIC_EVENTS
5491 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5492 "\t\t\t Write into this file to define/undefine new trace events.\n"
5494 #ifdef CONFIG_KPROBE_EVENTS
5495 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5496 "\t\t\t Write into this file to define/undefine new trace events.\n"
5498 #ifdef CONFIG_UPROBE_EVENTS
5499 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5500 "\t\t\t Write into this file to define/undefine new trace events.\n"
5502 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5503 "\t accepts: event-definitions (one definition per line)\n"
5504 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5505 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5506 #ifdef CONFIG_HIST_TRIGGERS
5507 "\t s:[synthetic/]<event> <field> [<field>]\n"
5509 "\t -:[<group>/]<event>\n"
5510 #ifdef CONFIG_KPROBE_EVENTS
5511 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5512 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5514 #ifdef CONFIG_UPROBE_EVENTS
5515 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5517 "\t args: <name>=fetcharg[:type]\n"
5518 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5519 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5520 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5522 "\t $stack<index>, $stack, $retval, $comm,\n"
5524 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5525 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5526 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5527 "\t <type>\\[<array-size>\\]\n"
5528 #ifdef CONFIG_HIST_TRIGGERS
5529 "\t field: <stype> <name>;\n"
5530 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5531 "\t [unsigned] char/int/long\n"
5534 " events/\t\t- Directory containing all trace event subsystems:\n"
5535 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5536 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5537 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5539 " filter\t\t- If set, only events passing filter are traced\n"
5540 " events/<system>/<event>/\t- Directory containing control files for\n"
5542 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5543 " filter\t\t- If set, only events passing filter are traced\n"
5544 " trigger\t\t- If set, a command to perform when event is hit\n"
5545 "\t Format: <trigger>[:count][if <filter>]\n"
5546 "\t trigger: traceon, traceoff\n"
5547 "\t enable_event:<system>:<event>\n"
5548 "\t disable_event:<system>:<event>\n"
5549 #ifdef CONFIG_HIST_TRIGGERS
5550 "\t enable_hist:<system>:<event>\n"
5551 "\t disable_hist:<system>:<event>\n"
5553 #ifdef CONFIG_STACKTRACE
5556 #ifdef CONFIG_TRACER_SNAPSHOT
5559 #ifdef CONFIG_HIST_TRIGGERS
5560 "\t\t hist (see below)\n"
5562 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5563 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5564 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5565 "\t events/block/block_unplug/trigger\n"
5566 "\t The first disables tracing every time block_unplug is hit.\n"
5567 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5568 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5569 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5570 "\t Like function triggers, the counter is only decremented if it\n"
5571 "\t enabled or disabled tracing.\n"
5572 "\t To remove a trigger without a count:\n"
5573 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5574 "\t To remove a trigger with a count:\n"
5575 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5576 "\t Filters can be ignored when removing a trigger.\n"
5577 #ifdef CONFIG_HIST_TRIGGERS
5578 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5579 "\t Format: hist:keys=<field1[,field2,...]>\n"
5580 "\t [:values=<field1[,field2,...]>]\n"
5581 "\t [:sort=<field1[,field2,...]>]\n"
5582 "\t [:size=#entries]\n"
5583 "\t [:pause][:continue][:clear]\n"
5584 "\t [:name=histname1]\n"
5585 "\t [:<handler>.<action>]\n"
5586 "\t [if <filter>]\n\n"
5587 "\t When a matching event is hit, an entry is added to a hash\n"
5588 "\t table using the key(s) and value(s) named, and the value of a\n"
5589 "\t sum called 'hitcount' is incremented. Keys and values\n"
5590 "\t correspond to fields in the event's format description. Keys\n"
5591 "\t can be any field, or the special string 'stacktrace'.\n"
5592 "\t Compound keys consisting of up to two fields can be specified\n"
5593 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5594 "\t fields. Sort keys consisting of up to two fields can be\n"
5595 "\t specified using the 'sort' keyword. The sort direction can\n"
5596 "\t be modified by appending '.descending' or '.ascending' to a\n"
5597 "\t sort field. The 'size' parameter can be used to specify more\n"
5598 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5599 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5600 "\t its histogram data will be shared with other triggers of the\n"
5601 "\t same name, and trigger hits will update this common data.\n\n"
5602 "\t Reading the 'hist' file for the event will dump the hash\n"
5603 "\t table in its entirety to stdout. If there are multiple hist\n"
5604 "\t triggers attached to an event, there will be a table for each\n"
5605 "\t trigger in the output. The table displayed for a named\n"
5606 "\t trigger will be the same as any other instance having the\n"
5607 "\t same name. The default format used to display a given field\n"
5608 "\t can be modified by appending any of the following modifiers\n"
5609 "\t to the field name, as applicable:\n\n"
5610 "\t .hex display a number as a hex value\n"
5611 "\t .sym display an address as a symbol\n"
5612 "\t .sym-offset display an address as a symbol and offset\n"
5613 "\t .execname display a common_pid as a program name\n"
5614 "\t .syscall display a syscall id as a syscall name\n"
5615 "\t .log2 display log2 value rather than raw number\n"
5616 "\t .usecs display a common_timestamp in microseconds\n\n"
5617 "\t The 'pause' parameter can be used to pause an existing hist\n"
5618 "\t trigger or to start a hist trigger but not log any events\n"
5619 "\t until told to do so. 'continue' can be used to start or\n"
5620 "\t restart a paused hist trigger.\n\n"
5621 "\t The 'clear' parameter will clear the contents of a running\n"
5622 "\t hist trigger and leave its current paused/active state\n"
5624 "\t The enable_hist and disable_hist triggers can be used to\n"
5625 "\t have one event conditionally start and stop another event's\n"
5626 "\t already-attached hist trigger. The syntax is analogous to\n"
5627 "\t the enable_event and disable_event triggers.\n\n"
5628 "\t Hist trigger handlers and actions are executed whenever a\n"
5629 "\t a histogram entry is added or updated. They take the form:\n\n"
5630 "\t <handler>.<action>\n\n"
5631 "\t The available handlers are:\n\n"
5632 "\t onmatch(matching.event) - invoke on addition or update\n"
5633 "\t onmax(var) - invoke if var exceeds current max\n"
5634 "\t onchange(var) - invoke action if var changes\n\n"
5635 "\t The available actions are:\n\n"
5636 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5637 "\t save(field,...) - save current event fields\n"
5638 #ifdef CONFIG_TRACER_SNAPSHOT
5639 "\t snapshot() - snapshot the trace buffer\n\n"
5641 #ifdef CONFIG_SYNTH_EVENTS
5642 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5643 "\t Write into this file to define/undefine new synthetic events.\n"
5644 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5650 tracing_readme_read(struct file *filp, char __user *ubuf,
5651 size_t cnt, loff_t *ppos)
5653 return simple_read_from_buffer(ubuf, cnt, ppos,
5654 readme_msg, strlen(readme_msg));
5657 static const struct file_operations tracing_readme_fops = {
5658 .open = tracing_open_generic,
5659 .read = tracing_readme_read,
5660 .llseek = generic_file_llseek,
5663 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5667 if (*pos || m->count)
5672 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5673 if (trace_find_tgid(*ptr))
5680 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5690 v = saved_tgids_next(m, v, &l);
5698 static void saved_tgids_stop(struct seq_file *m, void *v)
5702 static int saved_tgids_show(struct seq_file *m, void *v)
5704 int pid = (int *)v - tgid_map;
5706 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5710 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5711 .start = saved_tgids_start,
5712 .stop = saved_tgids_stop,
5713 .next = saved_tgids_next,
5714 .show = saved_tgids_show,
5717 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5721 ret = tracing_check_open_get_tr(NULL);
5725 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5729 static const struct file_operations tracing_saved_tgids_fops = {
5730 .open = tracing_saved_tgids_open,
5732 .llseek = seq_lseek,
5733 .release = seq_release,
5736 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5738 unsigned int *ptr = v;
5740 if (*pos || m->count)
5745 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5747 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5756 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5762 arch_spin_lock(&trace_cmdline_lock);
5764 v = &savedcmd->map_cmdline_to_pid[0];
5766 v = saved_cmdlines_next(m, v, &l);
5774 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5776 arch_spin_unlock(&trace_cmdline_lock);
5780 static int saved_cmdlines_show(struct seq_file *m, void *v)
5782 char buf[TASK_COMM_LEN];
5783 unsigned int *pid = v;
5785 __trace_find_cmdline(*pid, buf);
5786 seq_printf(m, "%d %s\n", *pid, buf);
5790 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5791 .start = saved_cmdlines_start,
5792 .next = saved_cmdlines_next,
5793 .stop = saved_cmdlines_stop,
5794 .show = saved_cmdlines_show,
5797 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5801 ret = tracing_check_open_get_tr(NULL);
5805 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5808 static const struct file_operations tracing_saved_cmdlines_fops = {
5809 .open = tracing_saved_cmdlines_open,
5811 .llseek = seq_lseek,
5812 .release = seq_release,
5816 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5817 size_t cnt, loff_t *ppos)
5822 arch_spin_lock(&trace_cmdline_lock);
5823 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5824 arch_spin_unlock(&trace_cmdline_lock);
5826 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5829 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5831 kfree(s->saved_cmdlines);
5832 kfree(s->map_cmdline_to_pid);
5836 static int tracing_resize_saved_cmdlines(unsigned int val)
5838 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5840 s = kmalloc(sizeof(*s), GFP_KERNEL);
5844 if (allocate_cmdlines_buffer(val, s) < 0) {
5849 arch_spin_lock(&trace_cmdline_lock);
5850 savedcmd_temp = savedcmd;
5852 arch_spin_unlock(&trace_cmdline_lock);
5853 free_saved_cmdlines_buffer(savedcmd_temp);
5859 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5860 size_t cnt, loff_t *ppos)
5865 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5869 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5870 if (!val || val > PID_MAX_DEFAULT)
5873 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5882 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5883 .open = tracing_open_generic,
5884 .read = tracing_saved_cmdlines_size_read,
5885 .write = tracing_saved_cmdlines_size_write,
5888 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5889 static union trace_eval_map_item *
5890 update_eval_map(union trace_eval_map_item *ptr)
5892 if (!ptr->map.eval_string) {
5893 if (ptr->tail.next) {
5894 ptr = ptr->tail.next;
5895 /* Set ptr to the next real item (skip head) */
5903 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5905 union trace_eval_map_item *ptr = v;
5908 * Paranoid! If ptr points to end, we don't want to increment past it.
5909 * This really should never happen.
5912 ptr = update_eval_map(ptr);
5913 if (WARN_ON_ONCE(!ptr))
5917 ptr = update_eval_map(ptr);
5922 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5924 union trace_eval_map_item *v;
5927 mutex_lock(&trace_eval_mutex);
5929 v = trace_eval_maps;
5933 while (v && l < *pos) {
5934 v = eval_map_next(m, v, &l);
5940 static void eval_map_stop(struct seq_file *m, void *v)
5942 mutex_unlock(&trace_eval_mutex);
5945 static int eval_map_show(struct seq_file *m, void *v)
5947 union trace_eval_map_item *ptr = v;
5949 seq_printf(m, "%s %ld (%s)\n",
5950 ptr->map.eval_string, ptr->map.eval_value,
5956 static const struct seq_operations tracing_eval_map_seq_ops = {
5957 .start = eval_map_start,
5958 .next = eval_map_next,
5959 .stop = eval_map_stop,
5960 .show = eval_map_show,
5963 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5967 ret = tracing_check_open_get_tr(NULL);
5971 return seq_open(filp, &tracing_eval_map_seq_ops);
5974 static const struct file_operations tracing_eval_map_fops = {
5975 .open = tracing_eval_map_open,
5977 .llseek = seq_lseek,
5978 .release = seq_release,
5981 static inline union trace_eval_map_item *
5982 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5984 /* Return tail of array given the head */
5985 return ptr + ptr->head.length + 1;
5989 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5992 struct trace_eval_map **stop;
5993 struct trace_eval_map **map;
5994 union trace_eval_map_item *map_array;
5995 union trace_eval_map_item *ptr;
6000 * The trace_eval_maps contains the map plus a head and tail item,
6001 * where the head holds the module and length of array, and the
6002 * tail holds a pointer to the next list.
6004 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6006 pr_warn("Unable to allocate trace eval mapping\n");
6010 mutex_lock(&trace_eval_mutex);
6012 if (!trace_eval_maps)
6013 trace_eval_maps = map_array;
6015 ptr = trace_eval_maps;
6017 ptr = trace_eval_jmp_to_tail(ptr);
6018 if (!ptr->tail.next)
6020 ptr = ptr->tail.next;
6023 ptr->tail.next = map_array;
6025 map_array->head.mod = mod;
6026 map_array->head.length = len;
6029 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6030 map_array->map = **map;
6033 memset(map_array, 0, sizeof(*map_array));
6035 mutex_unlock(&trace_eval_mutex);
6038 static void trace_create_eval_file(struct dentry *d_tracer)
6040 trace_create_file("eval_map", 0444, d_tracer,
6041 NULL, &tracing_eval_map_fops);
6044 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6045 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6046 static inline void trace_insert_eval_map_file(struct module *mod,
6047 struct trace_eval_map **start, int len) { }
6048 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6050 static void trace_insert_eval_map(struct module *mod,
6051 struct trace_eval_map **start, int len)
6053 struct trace_eval_map **map;
6060 trace_event_eval_update(map, len);
6062 trace_insert_eval_map_file(mod, start, len);
6066 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6067 size_t cnt, loff_t *ppos)
6069 struct trace_array *tr = filp->private_data;
6070 char buf[MAX_TRACER_SIZE+2];
6073 mutex_lock(&trace_types_lock);
6074 r = sprintf(buf, "%s\n", tr->current_trace->name);
6075 mutex_unlock(&trace_types_lock);
6077 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6080 int tracer_init(struct tracer *t, struct trace_array *tr)
6082 tracing_reset_online_cpus(&tr->array_buffer);
6086 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6090 for_each_tracing_cpu(cpu)
6091 per_cpu_ptr(buf->data, cpu)->entries = val;
6094 #ifdef CONFIG_TRACER_MAX_TRACE
6095 /* resize @tr's buffer to the size of @size_tr's entries */
6096 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6097 struct array_buffer *size_buf, int cpu_id)
6101 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6102 for_each_tracing_cpu(cpu) {
6103 ret = ring_buffer_resize(trace_buf->buffer,
6104 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6107 per_cpu_ptr(trace_buf->data, cpu)->entries =
6108 per_cpu_ptr(size_buf->data, cpu)->entries;
6111 ret = ring_buffer_resize(trace_buf->buffer,
6112 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6114 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6115 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6120 #endif /* CONFIG_TRACER_MAX_TRACE */
6122 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6123 unsigned long size, int cpu)
6128 * If kernel or user changes the size of the ring buffer
6129 * we use the size that was given, and we can forget about
6130 * expanding it later.
6132 ring_buffer_expanded = true;
6134 /* May be called before buffers are initialized */
6135 if (!tr->array_buffer.buffer)
6138 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6142 #ifdef CONFIG_TRACER_MAX_TRACE
6143 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6144 !tr->current_trace->use_max_tr)
6147 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6149 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6150 &tr->array_buffer, cpu);
6153 * AARGH! We are left with different
6154 * size max buffer!!!!
6155 * The max buffer is our "snapshot" buffer.
6156 * When a tracer needs a snapshot (one of the
6157 * latency tracers), it swaps the max buffer
6158 * with the saved snap shot. We succeeded to
6159 * update the size of the main buffer, but failed to
6160 * update the size of the max buffer. But when we tried
6161 * to reset the main buffer to the original size, we
6162 * failed there too. This is very unlikely to
6163 * happen, but if it does, warn and kill all
6167 tracing_disabled = 1;
6172 if (cpu == RING_BUFFER_ALL_CPUS)
6173 set_buffer_entries(&tr->max_buffer, size);
6175 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6178 #endif /* CONFIG_TRACER_MAX_TRACE */
6180 if (cpu == RING_BUFFER_ALL_CPUS)
6181 set_buffer_entries(&tr->array_buffer, size);
6183 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6188 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6189 unsigned long size, int cpu_id)
6193 mutex_lock(&trace_types_lock);
6195 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6196 /* make sure, this cpu is enabled in the mask */
6197 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6203 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6208 mutex_unlock(&trace_types_lock);
6215 * tracing_update_buffers - used by tracing facility to expand ring buffers
6217 * To save on memory when the tracing is never used on a system with it
6218 * configured in. The ring buffers are set to a minimum size. But once
6219 * a user starts to use the tracing facility, then they need to grow
6220 * to their default size.
6222 * This function is to be called when a tracer is about to be used.
6224 int tracing_update_buffers(void)
6228 mutex_lock(&trace_types_lock);
6229 if (!ring_buffer_expanded)
6230 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6231 RING_BUFFER_ALL_CPUS);
6232 mutex_unlock(&trace_types_lock);
6237 struct trace_option_dentry;
6240 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6243 * Used to clear out the tracer before deletion of an instance.
6244 * Must have trace_types_lock held.
6246 static void tracing_set_nop(struct trace_array *tr)
6248 if (tr->current_trace == &nop_trace)
6251 tr->current_trace->enabled--;
6253 if (tr->current_trace->reset)
6254 tr->current_trace->reset(tr);
6256 tr->current_trace = &nop_trace;
6259 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6261 /* Only enable if the directory has been created already. */
6265 create_trace_option_files(tr, t);
6268 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6271 #ifdef CONFIG_TRACER_MAX_TRACE
6276 mutex_lock(&trace_types_lock);
6278 if (!ring_buffer_expanded) {
6279 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6280 RING_BUFFER_ALL_CPUS);
6286 for (t = trace_types; t; t = t->next) {
6287 if (strcmp(t->name, buf) == 0)
6294 if (t == tr->current_trace)
6297 #ifdef CONFIG_TRACER_SNAPSHOT
6298 if (t->use_max_tr) {
6299 arch_spin_lock(&tr->max_lock);
6300 if (tr->cond_snapshot)
6302 arch_spin_unlock(&tr->max_lock);
6307 /* Some tracers won't work on kernel command line */
6308 if (system_state < SYSTEM_RUNNING && t->noboot) {
6309 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6314 /* Some tracers are only allowed for the top level buffer */
6315 if (!trace_ok_for_array(t, tr)) {
6320 /* If trace pipe files are being read, we can't change the tracer */
6321 if (tr->trace_ref) {
6326 trace_branch_disable();
6328 tr->current_trace->enabled--;
6330 if (tr->current_trace->reset)
6331 tr->current_trace->reset(tr);
6333 /* Current trace needs to be nop_trace before synchronize_rcu */
6334 tr->current_trace = &nop_trace;
6336 #ifdef CONFIG_TRACER_MAX_TRACE
6337 had_max_tr = tr->allocated_snapshot;
6339 if (had_max_tr && !t->use_max_tr) {
6341 * We need to make sure that the update_max_tr sees that
6342 * current_trace changed to nop_trace to keep it from
6343 * swapping the buffers after we resize it.
6344 * The update_max_tr is called from interrupts disabled
6345 * so a synchronized_sched() is sufficient.
6352 #ifdef CONFIG_TRACER_MAX_TRACE
6353 if (t->use_max_tr && !had_max_tr) {
6354 ret = tracing_alloc_snapshot_instance(tr);
6361 ret = tracer_init(t, tr);
6366 tr->current_trace = t;
6367 tr->current_trace->enabled++;
6368 trace_branch_enable(tr);
6370 mutex_unlock(&trace_types_lock);
6376 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6377 size_t cnt, loff_t *ppos)
6379 struct trace_array *tr = filp->private_data;
6380 char buf[MAX_TRACER_SIZE+1];
6387 if (cnt > MAX_TRACER_SIZE)
6388 cnt = MAX_TRACER_SIZE;
6390 if (copy_from_user(buf, ubuf, cnt))
6395 /* strip ending whitespace. */
6396 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6399 err = tracing_set_tracer(tr, buf);
6409 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6410 size_t cnt, loff_t *ppos)
6415 r = snprintf(buf, sizeof(buf), "%ld\n",
6416 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6417 if (r > sizeof(buf))
6419 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6423 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6424 size_t cnt, loff_t *ppos)
6429 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6439 tracing_thresh_read(struct file *filp, char __user *ubuf,
6440 size_t cnt, loff_t *ppos)
6442 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6446 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6447 size_t cnt, loff_t *ppos)
6449 struct trace_array *tr = filp->private_data;
6452 mutex_lock(&trace_types_lock);
6453 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6457 if (tr->current_trace->update_thresh) {
6458 ret = tr->current_trace->update_thresh(tr);
6465 mutex_unlock(&trace_types_lock);
6470 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6473 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6474 size_t cnt, loff_t *ppos)
6476 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6480 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6481 size_t cnt, loff_t *ppos)
6483 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6488 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6490 struct trace_array *tr = inode->i_private;
6491 struct trace_iterator *iter;
6494 ret = tracing_check_open_get_tr(tr);
6498 mutex_lock(&trace_types_lock);
6500 /* create a buffer to store the information to pass to userspace */
6501 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6504 __trace_array_put(tr);
6508 trace_seq_init(&iter->seq);
6509 iter->trace = tr->current_trace;
6511 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6516 /* trace pipe does not show start of buffer */
6517 cpumask_setall(iter->started);
6519 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6520 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6522 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6523 if (trace_clocks[tr->clock_id].in_ns)
6524 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6527 iter->array_buffer = &tr->array_buffer;
6528 iter->cpu_file = tracing_get_cpu(inode);
6529 mutex_init(&iter->mutex);
6530 filp->private_data = iter;
6532 if (iter->trace->pipe_open)
6533 iter->trace->pipe_open(iter);
6535 nonseekable_open(inode, filp);
6539 mutex_unlock(&trace_types_lock);
6544 __trace_array_put(tr);
6545 mutex_unlock(&trace_types_lock);
6549 static int tracing_release_pipe(struct inode *inode, struct file *file)
6551 struct trace_iterator *iter = file->private_data;
6552 struct trace_array *tr = inode->i_private;
6554 mutex_lock(&trace_types_lock);
6558 if (iter->trace->pipe_close)
6559 iter->trace->pipe_close(iter);
6561 mutex_unlock(&trace_types_lock);
6563 free_cpumask_var(iter->started);
6564 mutex_destroy(&iter->mutex);
6567 trace_array_put(tr);
6573 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6575 struct trace_array *tr = iter->tr;
6577 /* Iterators are static, they should be filled or empty */
6578 if (trace_buffer_iter(iter, iter->cpu_file))
6579 return EPOLLIN | EPOLLRDNORM;
6581 if (tr->trace_flags & TRACE_ITER_BLOCK)
6583 * Always select as readable when in blocking mode
6585 return EPOLLIN | EPOLLRDNORM;
6587 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6592 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6594 struct trace_iterator *iter = filp->private_data;
6596 return trace_poll(iter, filp, poll_table);
6599 /* Must be called with iter->mutex held. */
6600 static int tracing_wait_pipe(struct file *filp)
6602 struct trace_iterator *iter = filp->private_data;
6605 while (trace_empty(iter)) {
6607 if ((filp->f_flags & O_NONBLOCK)) {
6612 * We block until we read something and tracing is disabled.
6613 * We still block if tracing is disabled, but we have never
6614 * read anything. This allows a user to cat this file, and
6615 * then enable tracing. But after we have read something,
6616 * we give an EOF when tracing is again disabled.
6618 * iter->pos will be 0 if we haven't read anything.
6620 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6623 mutex_unlock(&iter->mutex);
6625 ret = wait_on_pipe(iter, 0);
6627 mutex_lock(&iter->mutex);
6640 tracing_read_pipe(struct file *filp, char __user *ubuf,
6641 size_t cnt, loff_t *ppos)
6643 struct trace_iterator *iter = filp->private_data;
6647 * Avoid more than one consumer on a single file descriptor
6648 * This is just a matter of traces coherency, the ring buffer itself
6651 mutex_lock(&iter->mutex);
6653 /* return any leftover data */
6654 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6658 trace_seq_init(&iter->seq);
6660 if (iter->trace->read) {
6661 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6667 sret = tracing_wait_pipe(filp);
6671 /* stop when tracing is finished */
6672 if (trace_empty(iter)) {
6677 if (cnt >= PAGE_SIZE)
6678 cnt = PAGE_SIZE - 1;
6680 /* reset all but tr, trace, and overruns */
6681 memset(&iter->seq, 0,
6682 sizeof(struct trace_iterator) -
6683 offsetof(struct trace_iterator, seq));
6684 cpumask_clear(iter->started);
6685 trace_seq_init(&iter->seq);
6688 trace_event_read_lock();
6689 trace_access_lock(iter->cpu_file);
6690 while (trace_find_next_entry_inc(iter) != NULL) {
6691 enum print_line_t ret;
6692 int save_len = iter->seq.seq.len;
6694 ret = print_trace_line(iter);
6695 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6696 /* don't print partial lines */
6697 iter->seq.seq.len = save_len;
6700 if (ret != TRACE_TYPE_NO_CONSUME)
6701 trace_consume(iter);
6703 if (trace_seq_used(&iter->seq) >= cnt)
6707 * Setting the full flag means we reached the trace_seq buffer
6708 * size and we should leave by partial output condition above.
6709 * One of the trace_seq_* functions is not used properly.
6711 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6714 trace_access_unlock(iter->cpu_file);
6715 trace_event_read_unlock();
6717 /* Now copy what we have to the user */
6718 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6719 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6720 trace_seq_init(&iter->seq);
6723 * If there was nothing to send to user, in spite of consuming trace
6724 * entries, go back to wait for more entries.
6730 mutex_unlock(&iter->mutex);
6735 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6738 __free_page(spd->pages[idx]);
6742 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6748 /* Seq buffer is page-sized, exactly what we need. */
6750 save_len = iter->seq.seq.len;
6751 ret = print_trace_line(iter);
6753 if (trace_seq_has_overflowed(&iter->seq)) {
6754 iter->seq.seq.len = save_len;
6759 * This should not be hit, because it should only
6760 * be set if the iter->seq overflowed. But check it
6761 * anyway to be safe.
6763 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6764 iter->seq.seq.len = save_len;
6768 count = trace_seq_used(&iter->seq) - save_len;
6771 iter->seq.seq.len = save_len;
6775 if (ret != TRACE_TYPE_NO_CONSUME)
6776 trace_consume(iter);
6778 if (!trace_find_next_entry_inc(iter)) {
6788 static ssize_t tracing_splice_read_pipe(struct file *filp,
6790 struct pipe_inode_info *pipe,
6794 struct page *pages_def[PIPE_DEF_BUFFERS];
6795 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6796 struct trace_iterator *iter = filp->private_data;
6797 struct splice_pipe_desc spd = {
6799 .partial = partial_def,
6800 .nr_pages = 0, /* This gets updated below. */
6801 .nr_pages_max = PIPE_DEF_BUFFERS,
6802 .ops = &default_pipe_buf_ops,
6803 .spd_release = tracing_spd_release_pipe,
6809 if (splice_grow_spd(pipe, &spd))
6812 mutex_lock(&iter->mutex);
6814 if (iter->trace->splice_read) {
6815 ret = iter->trace->splice_read(iter, filp,
6816 ppos, pipe, len, flags);
6821 ret = tracing_wait_pipe(filp);
6825 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6830 trace_event_read_lock();
6831 trace_access_lock(iter->cpu_file);
6833 /* Fill as many pages as possible. */
6834 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6835 spd.pages[i] = alloc_page(GFP_KERNEL);
6839 rem = tracing_fill_pipe_page(rem, iter);
6841 /* Copy the data into the page, so we can start over. */
6842 ret = trace_seq_to_buffer(&iter->seq,
6843 page_address(spd.pages[i]),
6844 trace_seq_used(&iter->seq));
6846 __free_page(spd.pages[i]);
6849 spd.partial[i].offset = 0;
6850 spd.partial[i].len = trace_seq_used(&iter->seq);
6852 trace_seq_init(&iter->seq);
6855 trace_access_unlock(iter->cpu_file);
6856 trace_event_read_unlock();
6857 mutex_unlock(&iter->mutex);
6862 ret = splice_to_pipe(pipe, &spd);
6866 splice_shrink_spd(&spd);
6870 mutex_unlock(&iter->mutex);
6875 tracing_entries_read(struct file *filp, char __user *ubuf,
6876 size_t cnt, loff_t *ppos)
6878 struct inode *inode = file_inode(filp);
6879 struct trace_array *tr = inode->i_private;
6880 int cpu = tracing_get_cpu(inode);
6885 mutex_lock(&trace_types_lock);
6887 if (cpu == RING_BUFFER_ALL_CPUS) {
6888 int cpu, buf_size_same;
6893 /* check if all cpu sizes are same */
6894 for_each_tracing_cpu(cpu) {
6895 /* fill in the size from first enabled cpu */
6897 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6898 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6904 if (buf_size_same) {
6905 if (!ring_buffer_expanded)
6906 r = sprintf(buf, "%lu (expanded: %lu)\n",
6908 trace_buf_size >> 10);
6910 r = sprintf(buf, "%lu\n", size >> 10);
6912 r = sprintf(buf, "X\n");
6914 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6916 mutex_unlock(&trace_types_lock);
6918 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6923 tracing_entries_write(struct file *filp, const char __user *ubuf,
6924 size_t cnt, loff_t *ppos)
6926 struct inode *inode = file_inode(filp);
6927 struct trace_array *tr = inode->i_private;
6931 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6935 /* must have at least 1 entry */
6939 /* value is in KB */
6941 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6951 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6952 size_t cnt, loff_t *ppos)
6954 struct trace_array *tr = filp->private_data;
6957 unsigned long size = 0, expanded_size = 0;
6959 mutex_lock(&trace_types_lock);
6960 for_each_tracing_cpu(cpu) {
6961 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6962 if (!ring_buffer_expanded)
6963 expanded_size += trace_buf_size >> 10;
6965 if (ring_buffer_expanded)
6966 r = sprintf(buf, "%lu\n", size);
6968 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6969 mutex_unlock(&trace_types_lock);
6971 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6975 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6976 size_t cnt, loff_t *ppos)
6979 * There is no need to read what the user has written, this function
6980 * is just to make sure that there is no error when "echo" is used
6989 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6991 struct trace_array *tr = inode->i_private;
6993 /* disable tracing ? */
6994 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6995 tracer_tracing_off(tr);
6996 /* resize the ring buffer to 0 */
6997 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6999 trace_array_put(tr);
7005 tracing_mark_write(struct file *filp, const char __user *ubuf,
7006 size_t cnt, loff_t *fpos)
7008 struct trace_array *tr = filp->private_data;
7009 struct ring_buffer_event *event;
7010 enum event_trigger_type tt = ETT_NONE;
7011 struct trace_buffer *buffer;
7012 struct print_entry *entry;
7017 /* Used in tracing_mark_raw_write() as well */
7018 #define FAULTED_STR "<faulted>"
7019 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7021 if (tracing_disabled)
7024 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7027 if (cnt > TRACE_BUF_SIZE)
7028 cnt = TRACE_BUF_SIZE;
7030 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7032 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7034 /* If less than "<faulted>", then make sure we can still add that */
7035 if (cnt < FAULTED_SIZE)
7036 size += FAULTED_SIZE - cnt;
7038 buffer = tr->array_buffer.buffer;
7039 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7041 if (unlikely(!event))
7042 /* Ring buffer disabled, return as if not open for write */
7045 entry = ring_buffer_event_data(event);
7046 entry->ip = _THIS_IP_;
7048 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7050 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7056 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7057 /* do not add \n before testing triggers, but add \0 */
7058 entry->buf[cnt] = '\0';
7059 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7062 if (entry->buf[cnt - 1] != '\n') {
7063 entry->buf[cnt] = '\n';
7064 entry->buf[cnt + 1] = '\0';
7066 entry->buf[cnt] = '\0';
7068 if (static_branch_unlikely(&trace_marker_exports_enabled))
7069 ftrace_exports(event, TRACE_EXPORT_MARKER);
7070 __buffer_unlock_commit(buffer, event);
7073 event_triggers_post_call(tr->trace_marker_file, tt);
7081 /* Limit it for now to 3K (including tag) */
7082 #define RAW_DATA_MAX_SIZE (1024*3)
7085 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7086 size_t cnt, loff_t *fpos)
7088 struct trace_array *tr = filp->private_data;
7089 struct ring_buffer_event *event;
7090 struct trace_buffer *buffer;
7091 struct raw_data_entry *entry;
7096 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7098 if (tracing_disabled)
7101 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7104 /* The marker must at least have a tag id */
7105 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7108 if (cnt > TRACE_BUF_SIZE)
7109 cnt = TRACE_BUF_SIZE;
7111 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7113 size = sizeof(*entry) + cnt;
7114 if (cnt < FAULT_SIZE_ID)
7115 size += FAULT_SIZE_ID - cnt;
7117 buffer = tr->array_buffer.buffer;
7118 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7121 /* Ring buffer disabled, return as if not open for write */
7124 entry = ring_buffer_event_data(event);
7126 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7129 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7134 __buffer_unlock_commit(buffer, event);
7142 static int tracing_clock_show(struct seq_file *m, void *v)
7144 struct trace_array *tr = m->private;
7147 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7149 "%s%s%s%s", i ? " " : "",
7150 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7151 i == tr->clock_id ? "]" : "");
7157 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7161 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7162 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7165 if (i == ARRAY_SIZE(trace_clocks))
7168 mutex_lock(&trace_types_lock);
7172 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7175 * New clock may not be consistent with the previous clock.
7176 * Reset the buffer so that it doesn't have incomparable timestamps.
7178 tracing_reset_online_cpus(&tr->array_buffer);
7180 #ifdef CONFIG_TRACER_MAX_TRACE
7181 if (tr->max_buffer.buffer)
7182 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7183 tracing_reset_online_cpus(&tr->max_buffer);
7186 mutex_unlock(&trace_types_lock);
7191 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7192 size_t cnt, loff_t *fpos)
7194 struct seq_file *m = filp->private_data;
7195 struct trace_array *tr = m->private;
7197 const char *clockstr;
7200 if (cnt >= sizeof(buf))
7203 if (copy_from_user(buf, ubuf, cnt))
7208 clockstr = strstrip(buf);
7210 ret = tracing_set_clock(tr, clockstr);
7219 static int tracing_clock_open(struct inode *inode, struct file *file)
7221 struct trace_array *tr = inode->i_private;
7224 ret = tracing_check_open_get_tr(tr);
7228 ret = single_open(file, tracing_clock_show, inode->i_private);
7230 trace_array_put(tr);
7235 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7237 struct trace_array *tr = m->private;
7239 mutex_lock(&trace_types_lock);
7241 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7242 seq_puts(m, "delta [absolute]\n");
7244 seq_puts(m, "[delta] absolute\n");
7246 mutex_unlock(&trace_types_lock);
7251 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7253 struct trace_array *tr = inode->i_private;
7256 ret = tracing_check_open_get_tr(tr);
7260 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7262 trace_array_put(tr);
7267 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7269 if (rbe == this_cpu_read(trace_buffered_event))
7270 return ring_buffer_time_stamp(buffer);
7272 return ring_buffer_event_time_stamp(buffer, rbe);
7276 * Set or disable using the per CPU trace_buffer_event when possible.
7278 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7282 mutex_lock(&trace_types_lock);
7284 if (set && tr->no_filter_buffering_ref++)
7288 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7293 --tr->no_filter_buffering_ref;
7296 mutex_unlock(&trace_types_lock);
7301 struct ftrace_buffer_info {
7302 struct trace_iterator iter;
7304 unsigned int spare_cpu;
7308 #ifdef CONFIG_TRACER_SNAPSHOT
7309 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7311 struct trace_array *tr = inode->i_private;
7312 struct trace_iterator *iter;
7316 ret = tracing_check_open_get_tr(tr);
7320 if (file->f_mode & FMODE_READ) {
7321 iter = __tracing_open(inode, file, true);
7323 ret = PTR_ERR(iter);
7325 /* Writes still need the seq_file to hold the private data */
7327 m = kzalloc(sizeof(*m), GFP_KERNEL);
7330 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7338 iter->array_buffer = &tr->max_buffer;
7339 iter->cpu_file = tracing_get_cpu(inode);
7341 file->private_data = m;
7345 trace_array_put(tr);
7351 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7354 struct seq_file *m = filp->private_data;
7355 struct trace_iterator *iter = m->private;
7356 struct trace_array *tr = iter->tr;
7360 ret = tracing_update_buffers();
7364 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7368 mutex_lock(&trace_types_lock);
7370 if (tr->current_trace->use_max_tr) {
7375 arch_spin_lock(&tr->max_lock);
7376 if (tr->cond_snapshot)
7378 arch_spin_unlock(&tr->max_lock);
7384 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7388 if (tr->allocated_snapshot)
7392 /* Only allow per-cpu swap if the ring buffer supports it */
7393 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7394 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7399 if (tr->allocated_snapshot)
7400 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7401 &tr->array_buffer, iter->cpu_file);
7403 ret = tracing_alloc_snapshot_instance(tr);
7406 local_irq_disable();
7407 /* Now, we're going to swap */
7408 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7409 update_max_tr(tr, current, smp_processor_id(), NULL);
7411 update_max_tr_single(tr, current, iter->cpu_file);
7415 if (tr->allocated_snapshot) {
7416 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7417 tracing_reset_online_cpus(&tr->max_buffer);
7419 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7429 mutex_unlock(&trace_types_lock);
7433 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7435 struct seq_file *m = file->private_data;
7438 ret = tracing_release(inode, file);
7440 if (file->f_mode & FMODE_READ)
7443 /* If write only, the seq_file is just a stub */
7451 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7452 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7453 size_t count, loff_t *ppos);
7454 static int tracing_buffers_release(struct inode *inode, struct file *file);
7455 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7456 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7458 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7460 struct ftrace_buffer_info *info;
7463 /* The following checks for tracefs lockdown */
7464 ret = tracing_buffers_open(inode, filp);
7468 info = filp->private_data;
7470 if (info->iter.trace->use_max_tr) {
7471 tracing_buffers_release(inode, filp);
7475 info->iter.snapshot = true;
7476 info->iter.array_buffer = &info->iter.tr->max_buffer;
7481 #endif /* CONFIG_TRACER_SNAPSHOT */
7484 static const struct file_operations tracing_thresh_fops = {
7485 .open = tracing_open_generic,
7486 .read = tracing_thresh_read,
7487 .write = tracing_thresh_write,
7488 .llseek = generic_file_llseek,
7491 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7492 static const struct file_operations tracing_max_lat_fops = {
7493 .open = tracing_open_generic,
7494 .read = tracing_max_lat_read,
7495 .write = tracing_max_lat_write,
7496 .llseek = generic_file_llseek,
7500 static const struct file_operations set_tracer_fops = {
7501 .open = tracing_open_generic,
7502 .read = tracing_set_trace_read,
7503 .write = tracing_set_trace_write,
7504 .llseek = generic_file_llseek,
7507 static const struct file_operations tracing_pipe_fops = {
7508 .open = tracing_open_pipe,
7509 .poll = tracing_poll_pipe,
7510 .read = tracing_read_pipe,
7511 .splice_read = tracing_splice_read_pipe,
7512 .release = tracing_release_pipe,
7513 .llseek = no_llseek,
7516 static const struct file_operations tracing_entries_fops = {
7517 .open = tracing_open_generic_tr,
7518 .read = tracing_entries_read,
7519 .write = tracing_entries_write,
7520 .llseek = generic_file_llseek,
7521 .release = tracing_release_generic_tr,
7524 static const struct file_operations tracing_total_entries_fops = {
7525 .open = tracing_open_generic_tr,
7526 .read = tracing_total_entries_read,
7527 .llseek = generic_file_llseek,
7528 .release = tracing_release_generic_tr,
7531 static const struct file_operations tracing_free_buffer_fops = {
7532 .open = tracing_open_generic_tr,
7533 .write = tracing_free_buffer_write,
7534 .release = tracing_free_buffer_release,
7537 static const struct file_operations tracing_mark_fops = {
7538 .open = tracing_open_generic_tr,
7539 .write = tracing_mark_write,
7540 .llseek = generic_file_llseek,
7541 .release = tracing_release_generic_tr,
7544 static const struct file_operations tracing_mark_raw_fops = {
7545 .open = tracing_open_generic_tr,
7546 .write = tracing_mark_raw_write,
7547 .llseek = generic_file_llseek,
7548 .release = tracing_release_generic_tr,
7551 static const struct file_operations trace_clock_fops = {
7552 .open = tracing_clock_open,
7554 .llseek = seq_lseek,
7555 .release = tracing_single_release_tr,
7556 .write = tracing_clock_write,
7559 static const struct file_operations trace_time_stamp_mode_fops = {
7560 .open = tracing_time_stamp_mode_open,
7562 .llseek = seq_lseek,
7563 .release = tracing_single_release_tr,
7566 #ifdef CONFIG_TRACER_SNAPSHOT
7567 static const struct file_operations snapshot_fops = {
7568 .open = tracing_snapshot_open,
7570 .write = tracing_snapshot_write,
7571 .llseek = tracing_lseek,
7572 .release = tracing_snapshot_release,
7575 static const struct file_operations snapshot_raw_fops = {
7576 .open = snapshot_raw_open,
7577 .read = tracing_buffers_read,
7578 .release = tracing_buffers_release,
7579 .splice_read = tracing_buffers_splice_read,
7580 .llseek = no_llseek,
7583 #endif /* CONFIG_TRACER_SNAPSHOT */
7586 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7587 * @filp: The active open file structure
7588 * @ubuf: The userspace provided buffer to read value into
7589 * @cnt: The maximum number of bytes to read
7590 * @ppos: The current "file" position
7592 * This function implements the write interface for a struct trace_min_max_param.
7593 * The filp->private_data must point to a trace_min_max_param structure that
7594 * defines where to write the value, the min and the max acceptable values,
7595 * and a lock to protect the write.
7598 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7600 struct trace_min_max_param *param = filp->private_data;
7607 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7612 mutex_lock(param->lock);
7614 if (param->min && val < *param->min)
7617 if (param->max && val > *param->max)
7624 mutex_unlock(param->lock);
7633 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7634 * @filp: The active open file structure
7635 * @ubuf: The userspace provided buffer to read value into
7636 * @cnt: The maximum number of bytes to read
7637 * @ppos: The current "file" position
7639 * This function implements the read interface for a struct trace_min_max_param.
7640 * The filp->private_data must point to a trace_min_max_param struct with valid
7644 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7646 struct trace_min_max_param *param = filp->private_data;
7647 char buf[U64_STR_SIZE];
7656 if (cnt > sizeof(buf))
7659 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7661 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7664 const struct file_operations trace_min_max_fops = {
7665 .open = tracing_open_generic,
7666 .read = trace_min_max_read,
7667 .write = trace_min_max_write,
7670 #define TRACING_LOG_ERRS_MAX 8
7671 #define TRACING_LOG_LOC_MAX 128
7673 #define CMD_PREFIX " Command: "
7676 const char **errs; /* ptr to loc-specific array of err strings */
7677 u8 type; /* index into errs -> specific err string */
7678 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7682 struct tracing_log_err {
7683 struct list_head list;
7684 struct err_info info;
7685 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7686 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7689 static DEFINE_MUTEX(tracing_err_log_lock);
7691 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7693 struct tracing_log_err *err;
7695 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7696 err = kzalloc(sizeof(*err), GFP_KERNEL);
7698 err = ERR_PTR(-ENOMEM);
7699 tr->n_err_log_entries++;
7704 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7705 list_del(&err->list);
7711 * err_pos - find the position of a string within a command for error careting
7712 * @cmd: The tracing command that caused the error
7713 * @str: The string to position the caret at within @cmd
7715 * Finds the position of the first occurrence of @str within @cmd. The
7716 * return value can be passed to tracing_log_err() for caret placement
7719 * Returns the index within @cmd of the first occurrence of @str or 0
7720 * if @str was not found.
7722 unsigned int err_pos(char *cmd, const char *str)
7726 if (WARN_ON(!strlen(cmd)))
7729 found = strstr(cmd, str);
7737 * tracing_log_err - write an error to the tracing error log
7738 * @tr: The associated trace array for the error (NULL for top level array)
7739 * @loc: A string describing where the error occurred
7740 * @cmd: The tracing command that caused the error
7741 * @errs: The array of loc-specific static error strings
7742 * @type: The index into errs[], which produces the specific static err string
7743 * @pos: The position the caret should be placed in the cmd
7745 * Writes an error into tracing/error_log of the form:
7747 * <loc>: error: <text>
7751 * tracing/error_log is a small log file containing the last
7752 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7753 * unless there has been a tracing error, and the error log can be
7754 * cleared and have its memory freed by writing the empty string in
7755 * truncation mode to it i.e. echo > tracing/error_log.
7757 * NOTE: the @errs array along with the @type param are used to
7758 * produce a static error string - this string is not copied and saved
7759 * when the error is logged - only a pointer to it is saved. See
7760 * existing callers for examples of how static strings are typically
7761 * defined for use with tracing_log_err().
7763 void tracing_log_err(struct trace_array *tr,
7764 const char *loc, const char *cmd,
7765 const char **errs, u8 type, u8 pos)
7767 struct tracing_log_err *err;
7772 mutex_lock(&tracing_err_log_lock);
7773 err = get_tracing_log_err(tr);
7774 if (PTR_ERR(err) == -ENOMEM) {
7775 mutex_unlock(&tracing_err_log_lock);
7779 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7780 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7782 err->info.errs = errs;
7783 err->info.type = type;
7784 err->info.pos = pos;
7785 err->info.ts = local_clock();
7787 list_add_tail(&err->list, &tr->err_log);
7788 mutex_unlock(&tracing_err_log_lock);
7791 static void clear_tracing_err_log(struct trace_array *tr)
7793 struct tracing_log_err *err, *next;
7795 mutex_lock(&tracing_err_log_lock);
7796 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7797 list_del(&err->list);
7801 tr->n_err_log_entries = 0;
7802 mutex_unlock(&tracing_err_log_lock);
7805 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7807 struct trace_array *tr = m->private;
7809 mutex_lock(&tracing_err_log_lock);
7811 return seq_list_start(&tr->err_log, *pos);
7814 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7816 struct trace_array *tr = m->private;
7818 return seq_list_next(v, &tr->err_log, pos);
7821 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7823 mutex_unlock(&tracing_err_log_lock);
7826 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7830 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7832 for (i = 0; i < pos; i++)
7837 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7839 struct tracing_log_err *err = v;
7842 const char *err_text = err->info.errs[err->info.type];
7843 u64 sec = err->info.ts;
7846 nsec = do_div(sec, NSEC_PER_SEC);
7847 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7848 err->loc, err_text);
7849 seq_printf(m, "%s", err->cmd);
7850 tracing_err_log_show_pos(m, err->info.pos);
7856 static const struct seq_operations tracing_err_log_seq_ops = {
7857 .start = tracing_err_log_seq_start,
7858 .next = tracing_err_log_seq_next,
7859 .stop = tracing_err_log_seq_stop,
7860 .show = tracing_err_log_seq_show
7863 static int tracing_err_log_open(struct inode *inode, struct file *file)
7865 struct trace_array *tr = inode->i_private;
7868 ret = tracing_check_open_get_tr(tr);
7872 /* If this file was opened for write, then erase contents */
7873 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7874 clear_tracing_err_log(tr);
7876 if (file->f_mode & FMODE_READ) {
7877 ret = seq_open(file, &tracing_err_log_seq_ops);
7879 struct seq_file *m = file->private_data;
7882 trace_array_put(tr);
7888 static ssize_t tracing_err_log_write(struct file *file,
7889 const char __user *buffer,
7890 size_t count, loff_t *ppos)
7895 static int tracing_err_log_release(struct inode *inode, struct file *file)
7897 struct trace_array *tr = inode->i_private;
7899 trace_array_put(tr);
7901 if (file->f_mode & FMODE_READ)
7902 seq_release(inode, file);
7907 static const struct file_operations tracing_err_log_fops = {
7908 .open = tracing_err_log_open,
7909 .write = tracing_err_log_write,
7911 .llseek = seq_lseek,
7912 .release = tracing_err_log_release,
7915 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7917 struct trace_array *tr = inode->i_private;
7918 struct ftrace_buffer_info *info;
7921 ret = tracing_check_open_get_tr(tr);
7925 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7927 trace_array_put(tr);
7931 mutex_lock(&trace_types_lock);
7934 info->iter.cpu_file = tracing_get_cpu(inode);
7935 info->iter.trace = tr->current_trace;
7936 info->iter.array_buffer = &tr->array_buffer;
7938 /* Force reading ring buffer for first read */
7939 info->read = (unsigned int)-1;
7941 filp->private_data = info;
7945 mutex_unlock(&trace_types_lock);
7947 ret = nonseekable_open(inode, filp);
7949 trace_array_put(tr);
7955 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7957 struct ftrace_buffer_info *info = filp->private_data;
7958 struct trace_iterator *iter = &info->iter;
7960 return trace_poll(iter, filp, poll_table);
7964 tracing_buffers_read(struct file *filp, char __user *ubuf,
7965 size_t count, loff_t *ppos)
7967 struct ftrace_buffer_info *info = filp->private_data;
7968 struct trace_iterator *iter = &info->iter;
7975 #ifdef CONFIG_TRACER_MAX_TRACE
7976 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7981 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7983 if (IS_ERR(info->spare)) {
7984 ret = PTR_ERR(info->spare);
7987 info->spare_cpu = iter->cpu_file;
7993 /* Do we have previous read data to read? */
7994 if (info->read < PAGE_SIZE)
7998 trace_access_lock(iter->cpu_file);
7999 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8003 trace_access_unlock(iter->cpu_file);
8006 if (trace_empty(iter)) {
8007 if ((filp->f_flags & O_NONBLOCK))
8010 ret = wait_on_pipe(iter, 0);
8021 size = PAGE_SIZE - info->read;
8025 ret = copy_to_user(ubuf, info->spare + info->read, size);
8037 static int tracing_buffers_release(struct inode *inode, struct file *file)
8039 struct ftrace_buffer_info *info = file->private_data;
8040 struct trace_iterator *iter = &info->iter;
8042 mutex_lock(&trace_types_lock);
8044 iter->tr->trace_ref--;
8046 __trace_array_put(iter->tr);
8049 ring_buffer_free_read_page(iter->array_buffer->buffer,
8050 info->spare_cpu, info->spare);
8053 mutex_unlock(&trace_types_lock);
8059 struct trace_buffer *buffer;
8062 refcount_t refcount;
8065 static void buffer_ref_release(struct buffer_ref *ref)
8067 if (!refcount_dec_and_test(&ref->refcount))
8069 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8073 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8074 struct pipe_buffer *buf)
8076 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8078 buffer_ref_release(ref);
8082 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8083 struct pipe_buffer *buf)
8085 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8087 if (refcount_read(&ref->refcount) > INT_MAX/2)
8090 refcount_inc(&ref->refcount);
8094 /* Pipe buffer operations for a buffer. */
8095 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8096 .release = buffer_pipe_buf_release,
8097 .get = buffer_pipe_buf_get,
8101 * Callback from splice_to_pipe(), if we need to release some pages
8102 * at the end of the spd in case we error'ed out in filling the pipe.
8104 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8106 struct buffer_ref *ref =
8107 (struct buffer_ref *)spd->partial[i].private;
8109 buffer_ref_release(ref);
8110 spd->partial[i].private = 0;
8114 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8115 struct pipe_inode_info *pipe, size_t len,
8118 struct ftrace_buffer_info *info = file->private_data;
8119 struct trace_iterator *iter = &info->iter;
8120 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8121 struct page *pages_def[PIPE_DEF_BUFFERS];
8122 struct splice_pipe_desc spd = {
8124 .partial = partial_def,
8125 .nr_pages_max = PIPE_DEF_BUFFERS,
8126 .ops = &buffer_pipe_buf_ops,
8127 .spd_release = buffer_spd_release,
8129 struct buffer_ref *ref;
8133 #ifdef CONFIG_TRACER_MAX_TRACE
8134 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8138 if (*ppos & (PAGE_SIZE - 1))
8141 if (len & (PAGE_SIZE - 1)) {
8142 if (len < PAGE_SIZE)
8147 if (splice_grow_spd(pipe, &spd))
8151 trace_access_lock(iter->cpu_file);
8152 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8154 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8158 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8164 refcount_set(&ref->refcount, 1);
8165 ref->buffer = iter->array_buffer->buffer;
8166 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8167 if (IS_ERR(ref->page)) {
8168 ret = PTR_ERR(ref->page);
8173 ref->cpu = iter->cpu_file;
8175 r = ring_buffer_read_page(ref->buffer, &ref->page,
8176 len, iter->cpu_file, 1);
8178 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8184 page = virt_to_page(ref->page);
8186 spd.pages[i] = page;
8187 spd.partial[i].len = PAGE_SIZE;
8188 spd.partial[i].offset = 0;
8189 spd.partial[i].private = (unsigned long)ref;
8193 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8196 trace_access_unlock(iter->cpu_file);
8199 /* did we read anything? */
8200 if (!spd.nr_pages) {
8205 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8208 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8215 ret = splice_to_pipe(pipe, &spd);
8217 splice_shrink_spd(&spd);
8222 static const struct file_operations tracing_buffers_fops = {
8223 .open = tracing_buffers_open,
8224 .read = tracing_buffers_read,
8225 .poll = tracing_buffers_poll,
8226 .release = tracing_buffers_release,
8227 .splice_read = tracing_buffers_splice_read,
8228 .llseek = no_llseek,
8232 tracing_stats_read(struct file *filp, char __user *ubuf,
8233 size_t count, loff_t *ppos)
8235 struct inode *inode = file_inode(filp);
8236 struct trace_array *tr = inode->i_private;
8237 struct array_buffer *trace_buf = &tr->array_buffer;
8238 int cpu = tracing_get_cpu(inode);
8239 struct trace_seq *s;
8241 unsigned long long t;
8242 unsigned long usec_rem;
8244 s = kmalloc(sizeof(*s), GFP_KERNEL);
8250 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8251 trace_seq_printf(s, "entries: %ld\n", cnt);
8253 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8254 trace_seq_printf(s, "overrun: %ld\n", cnt);
8256 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8257 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8259 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8260 trace_seq_printf(s, "bytes: %ld\n", cnt);
8262 if (trace_clocks[tr->clock_id].in_ns) {
8263 /* local or global for trace_clock */
8264 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8265 usec_rem = do_div(t, USEC_PER_SEC);
8266 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8269 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8270 usec_rem = do_div(t, USEC_PER_SEC);
8271 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8273 /* counter or tsc mode for trace_clock */
8274 trace_seq_printf(s, "oldest event ts: %llu\n",
8275 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8277 trace_seq_printf(s, "now ts: %llu\n",
8278 ring_buffer_time_stamp(trace_buf->buffer));
8281 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8282 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8284 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8285 trace_seq_printf(s, "read events: %ld\n", cnt);
8287 count = simple_read_from_buffer(ubuf, count, ppos,
8288 s->buffer, trace_seq_used(s));
8295 static const struct file_operations tracing_stats_fops = {
8296 .open = tracing_open_generic_tr,
8297 .read = tracing_stats_read,
8298 .llseek = generic_file_llseek,
8299 .release = tracing_release_generic_tr,
8302 #ifdef CONFIG_DYNAMIC_FTRACE
8305 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8306 size_t cnt, loff_t *ppos)
8312 /* 256 should be plenty to hold the amount needed */
8313 buf = kmalloc(256, GFP_KERNEL);
8317 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8318 ftrace_update_tot_cnt,
8319 ftrace_number_of_pages,
8320 ftrace_number_of_groups);
8322 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8327 static const struct file_operations tracing_dyn_info_fops = {
8328 .open = tracing_open_generic,
8329 .read = tracing_read_dyn_info,
8330 .llseek = generic_file_llseek,
8332 #endif /* CONFIG_DYNAMIC_FTRACE */
8334 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8336 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8337 struct trace_array *tr, struct ftrace_probe_ops *ops,
8340 tracing_snapshot_instance(tr);
8344 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8345 struct trace_array *tr, struct ftrace_probe_ops *ops,
8348 struct ftrace_func_mapper *mapper = data;
8352 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8362 tracing_snapshot_instance(tr);
8366 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8367 struct ftrace_probe_ops *ops, void *data)
8369 struct ftrace_func_mapper *mapper = data;
8372 seq_printf(m, "%ps:", (void *)ip);
8374 seq_puts(m, "snapshot");
8377 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8380 seq_printf(m, ":count=%ld\n", *count);
8382 seq_puts(m, ":unlimited\n");
8388 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8389 unsigned long ip, void *init_data, void **data)
8391 struct ftrace_func_mapper *mapper = *data;
8394 mapper = allocate_ftrace_func_mapper();
8400 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8404 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8405 unsigned long ip, void *data)
8407 struct ftrace_func_mapper *mapper = data;
8412 free_ftrace_func_mapper(mapper, NULL);
8416 ftrace_func_mapper_remove_ip(mapper, ip);
8419 static struct ftrace_probe_ops snapshot_probe_ops = {
8420 .func = ftrace_snapshot,
8421 .print = ftrace_snapshot_print,
8424 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8425 .func = ftrace_count_snapshot,
8426 .print = ftrace_snapshot_print,
8427 .init = ftrace_snapshot_init,
8428 .free = ftrace_snapshot_free,
8432 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8433 char *glob, char *cmd, char *param, int enable)
8435 struct ftrace_probe_ops *ops;
8436 void *count = (void *)-1;
8443 /* hash funcs only work with set_ftrace_filter */
8447 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8450 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8455 number = strsep(¶m, ":");
8457 if (!strlen(number))
8461 * We use the callback data field (which is a pointer)
8464 ret = kstrtoul(number, 0, (unsigned long *)&count);
8469 ret = tracing_alloc_snapshot_instance(tr);
8473 ret = register_ftrace_function_probe(glob, tr, ops, count);
8476 return ret < 0 ? ret : 0;
8479 static struct ftrace_func_command ftrace_snapshot_cmd = {
8481 .func = ftrace_trace_snapshot_callback,
8484 static __init int register_snapshot_cmd(void)
8486 return register_ftrace_command(&ftrace_snapshot_cmd);
8489 static inline __init int register_snapshot_cmd(void) { return 0; }
8490 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8492 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8494 if (WARN_ON(!tr->dir))
8495 return ERR_PTR(-ENODEV);
8497 /* Top directory uses NULL as the parent */
8498 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8501 /* All sub buffers have a descriptor */
8505 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8507 struct dentry *d_tracer;
8510 return tr->percpu_dir;
8512 d_tracer = tracing_get_dentry(tr);
8513 if (IS_ERR(d_tracer))
8516 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8518 MEM_FAIL(!tr->percpu_dir,
8519 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8521 return tr->percpu_dir;
8524 static struct dentry *
8525 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8526 void *data, long cpu, const struct file_operations *fops)
8528 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8530 if (ret) /* See tracing_get_cpu() */
8531 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8536 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8538 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8539 struct dentry *d_cpu;
8540 char cpu_dir[30]; /* 30 characters should be more than enough */
8545 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8546 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8548 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8552 /* per cpu trace_pipe */
8553 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8554 tr, cpu, &tracing_pipe_fops);
8557 trace_create_cpu_file("trace", 0644, d_cpu,
8558 tr, cpu, &tracing_fops);
8560 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8561 tr, cpu, &tracing_buffers_fops);
8563 trace_create_cpu_file("stats", 0444, d_cpu,
8564 tr, cpu, &tracing_stats_fops);
8566 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8567 tr, cpu, &tracing_entries_fops);
8569 #ifdef CONFIG_TRACER_SNAPSHOT
8570 trace_create_cpu_file("snapshot", 0644, d_cpu,
8571 tr, cpu, &snapshot_fops);
8573 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8574 tr, cpu, &snapshot_raw_fops);
8578 #ifdef CONFIG_FTRACE_SELFTEST
8579 /* Let selftest have access to static functions in this file */
8580 #include "trace_selftest.c"
8584 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8587 struct trace_option_dentry *topt = filp->private_data;
8590 if (topt->flags->val & topt->opt->bit)
8595 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8599 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8602 struct trace_option_dentry *topt = filp->private_data;
8606 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8610 if (val != 0 && val != 1)
8613 if (!!(topt->flags->val & topt->opt->bit) != val) {
8614 mutex_lock(&trace_types_lock);
8615 ret = __set_tracer_option(topt->tr, topt->flags,
8617 mutex_unlock(&trace_types_lock);
8628 static const struct file_operations trace_options_fops = {
8629 .open = tracing_open_generic,
8630 .read = trace_options_read,
8631 .write = trace_options_write,
8632 .llseek = generic_file_llseek,
8636 * In order to pass in both the trace_array descriptor as well as the index
8637 * to the flag that the trace option file represents, the trace_array
8638 * has a character array of trace_flags_index[], which holds the index
8639 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8640 * The address of this character array is passed to the flag option file
8641 * read/write callbacks.
8643 * In order to extract both the index and the trace_array descriptor,
8644 * get_tr_index() uses the following algorithm.
8648 * As the pointer itself contains the address of the index (remember
8651 * Then to get the trace_array descriptor, by subtracting that index
8652 * from the ptr, we get to the start of the index itself.
8654 * ptr - idx == &index[0]
8656 * Then a simple container_of() from that pointer gets us to the
8657 * trace_array descriptor.
8659 static void get_tr_index(void *data, struct trace_array **ptr,
8660 unsigned int *pindex)
8662 *pindex = *(unsigned char *)data;
8664 *ptr = container_of(data - *pindex, struct trace_array,
8669 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8672 void *tr_index = filp->private_data;
8673 struct trace_array *tr;
8677 get_tr_index(tr_index, &tr, &index);
8679 if (tr->trace_flags & (1 << index))
8684 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8688 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8691 void *tr_index = filp->private_data;
8692 struct trace_array *tr;
8697 get_tr_index(tr_index, &tr, &index);
8699 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8703 if (val != 0 && val != 1)
8706 mutex_lock(&event_mutex);
8707 mutex_lock(&trace_types_lock);
8708 ret = set_tracer_flag(tr, 1 << index, val);
8709 mutex_unlock(&trace_types_lock);
8710 mutex_unlock(&event_mutex);
8720 static const struct file_operations trace_options_core_fops = {
8721 .open = tracing_open_generic,
8722 .read = trace_options_core_read,
8723 .write = trace_options_core_write,
8724 .llseek = generic_file_llseek,
8727 struct dentry *trace_create_file(const char *name,
8729 struct dentry *parent,
8731 const struct file_operations *fops)
8735 ret = tracefs_create_file(name, mode, parent, data, fops);
8737 pr_warn("Could not create tracefs '%s' entry\n", name);
8743 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8745 struct dentry *d_tracer;
8750 d_tracer = tracing_get_dentry(tr);
8751 if (IS_ERR(d_tracer))
8754 tr->options = tracefs_create_dir("options", d_tracer);
8756 pr_warn("Could not create tracefs directory 'options'\n");
8764 create_trace_option_file(struct trace_array *tr,
8765 struct trace_option_dentry *topt,
8766 struct tracer_flags *flags,
8767 struct tracer_opt *opt)
8769 struct dentry *t_options;
8771 t_options = trace_options_init_dentry(tr);
8775 topt->flags = flags;
8779 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8780 &trace_options_fops);
8785 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8787 struct trace_option_dentry *topts;
8788 struct trace_options *tr_topts;
8789 struct tracer_flags *flags;
8790 struct tracer_opt *opts;
8797 flags = tracer->flags;
8799 if (!flags || !flags->opts)
8803 * If this is an instance, only create flags for tracers
8804 * the instance may have.
8806 if (!trace_ok_for_array(tracer, tr))
8809 for (i = 0; i < tr->nr_topts; i++) {
8810 /* Make sure there's no duplicate flags. */
8811 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8817 for (cnt = 0; opts[cnt].name; cnt++)
8820 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8824 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8831 tr->topts = tr_topts;
8832 tr->topts[tr->nr_topts].tracer = tracer;
8833 tr->topts[tr->nr_topts].topts = topts;
8836 for (cnt = 0; opts[cnt].name; cnt++) {
8837 create_trace_option_file(tr, &topts[cnt], flags,
8839 MEM_FAIL(topts[cnt].entry == NULL,
8840 "Failed to create trace option: %s",
8845 static struct dentry *
8846 create_trace_option_core_file(struct trace_array *tr,
8847 const char *option, long index)
8849 struct dentry *t_options;
8851 t_options = trace_options_init_dentry(tr);
8855 return trace_create_file(option, 0644, t_options,
8856 (void *)&tr->trace_flags_index[index],
8857 &trace_options_core_fops);
8860 static void create_trace_options_dir(struct trace_array *tr)
8862 struct dentry *t_options;
8863 bool top_level = tr == &global_trace;
8866 t_options = trace_options_init_dentry(tr);
8870 for (i = 0; trace_options[i]; i++) {
8872 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8873 create_trace_option_core_file(tr, trace_options[i], i);
8878 rb_simple_read(struct file *filp, char __user *ubuf,
8879 size_t cnt, loff_t *ppos)
8881 struct trace_array *tr = filp->private_data;
8885 r = tracer_tracing_is_on(tr);
8886 r = sprintf(buf, "%d\n", r);
8888 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8892 rb_simple_write(struct file *filp, const char __user *ubuf,
8893 size_t cnt, loff_t *ppos)
8895 struct trace_array *tr = filp->private_data;
8896 struct trace_buffer *buffer = tr->array_buffer.buffer;
8900 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8905 mutex_lock(&trace_types_lock);
8906 if (!!val == tracer_tracing_is_on(tr)) {
8907 val = 0; /* do nothing */
8909 tracer_tracing_on(tr);
8910 if (tr->current_trace->start)
8911 tr->current_trace->start(tr);
8913 tracer_tracing_off(tr);
8914 if (tr->current_trace->stop)
8915 tr->current_trace->stop(tr);
8917 mutex_unlock(&trace_types_lock);
8925 static const struct file_operations rb_simple_fops = {
8926 .open = tracing_open_generic_tr,
8927 .read = rb_simple_read,
8928 .write = rb_simple_write,
8929 .release = tracing_release_generic_tr,
8930 .llseek = default_llseek,
8934 buffer_percent_read(struct file *filp, char __user *ubuf,
8935 size_t cnt, loff_t *ppos)
8937 struct trace_array *tr = filp->private_data;
8941 r = tr->buffer_percent;
8942 r = sprintf(buf, "%d\n", r);
8944 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8948 buffer_percent_write(struct file *filp, const char __user *ubuf,
8949 size_t cnt, loff_t *ppos)
8951 struct trace_array *tr = filp->private_data;
8955 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8965 tr->buffer_percent = val;
8972 static const struct file_operations buffer_percent_fops = {
8973 .open = tracing_open_generic_tr,
8974 .read = buffer_percent_read,
8975 .write = buffer_percent_write,
8976 .release = tracing_release_generic_tr,
8977 .llseek = default_llseek,
8980 static struct dentry *trace_instance_dir;
8983 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8986 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8988 enum ring_buffer_flags rb_flags;
8990 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8994 buf->buffer = ring_buffer_alloc(size, rb_flags);
8998 buf->data = alloc_percpu(struct trace_array_cpu);
9000 ring_buffer_free(buf->buffer);
9005 /* Allocate the first page for all buffers */
9006 set_buffer_entries(&tr->array_buffer,
9007 ring_buffer_size(tr->array_buffer.buffer, 0));
9012 static int allocate_trace_buffers(struct trace_array *tr, int size)
9016 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9020 #ifdef CONFIG_TRACER_MAX_TRACE
9021 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9022 allocate_snapshot ? size : 1);
9023 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9024 ring_buffer_free(tr->array_buffer.buffer);
9025 tr->array_buffer.buffer = NULL;
9026 free_percpu(tr->array_buffer.data);
9027 tr->array_buffer.data = NULL;
9030 tr->allocated_snapshot = allocate_snapshot;
9033 * Only the top level trace array gets its snapshot allocated
9034 * from the kernel command line.
9036 allocate_snapshot = false;
9042 static void free_trace_buffer(struct array_buffer *buf)
9045 ring_buffer_free(buf->buffer);
9047 free_percpu(buf->data);
9052 static void free_trace_buffers(struct trace_array *tr)
9057 free_trace_buffer(&tr->array_buffer);
9059 #ifdef CONFIG_TRACER_MAX_TRACE
9060 free_trace_buffer(&tr->max_buffer);
9064 static void init_trace_flags_index(struct trace_array *tr)
9068 /* Used by the trace options files */
9069 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9070 tr->trace_flags_index[i] = i;
9073 static void __update_tracer_options(struct trace_array *tr)
9077 for (t = trace_types; t; t = t->next)
9078 add_tracer_options(tr, t);
9081 static void update_tracer_options(struct trace_array *tr)
9083 mutex_lock(&trace_types_lock);
9084 __update_tracer_options(tr);
9085 mutex_unlock(&trace_types_lock);
9088 /* Must have trace_types_lock held */
9089 struct trace_array *trace_array_find(const char *instance)
9091 struct trace_array *tr, *found = NULL;
9093 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9094 if (tr->name && strcmp(tr->name, instance) == 0) {
9103 struct trace_array *trace_array_find_get(const char *instance)
9105 struct trace_array *tr;
9107 mutex_lock(&trace_types_lock);
9108 tr = trace_array_find(instance);
9111 mutex_unlock(&trace_types_lock);
9116 static int trace_array_create_dir(struct trace_array *tr)
9120 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9124 ret = event_trace_add_tracer(tr->dir, tr);
9126 tracefs_remove(tr->dir);
9128 init_tracer_tracefs(tr, tr->dir);
9129 __update_tracer_options(tr);
9134 static struct trace_array *trace_array_create(const char *name)
9136 struct trace_array *tr;
9140 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9142 return ERR_PTR(ret);
9144 tr->name = kstrdup(name, GFP_KERNEL);
9148 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9151 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9153 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9155 raw_spin_lock_init(&tr->start_lock);
9157 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9159 tr->current_trace = &nop_trace;
9161 INIT_LIST_HEAD(&tr->systems);
9162 INIT_LIST_HEAD(&tr->events);
9163 INIT_LIST_HEAD(&tr->hist_vars);
9164 INIT_LIST_HEAD(&tr->err_log);
9166 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9169 if (ftrace_allocate_ftrace_ops(tr) < 0)
9172 ftrace_init_trace_array(tr);
9174 init_trace_flags_index(tr);
9176 if (trace_instance_dir) {
9177 ret = trace_array_create_dir(tr);
9181 __trace_early_add_events(tr);
9183 list_add(&tr->list, &ftrace_trace_arrays);
9190 ftrace_free_ftrace_ops(tr);
9191 free_trace_buffers(tr);
9192 free_cpumask_var(tr->tracing_cpumask);
9196 return ERR_PTR(ret);
9199 static int instance_mkdir(const char *name)
9201 struct trace_array *tr;
9204 mutex_lock(&event_mutex);
9205 mutex_lock(&trace_types_lock);
9208 if (trace_array_find(name))
9211 tr = trace_array_create(name);
9213 ret = PTR_ERR_OR_ZERO(tr);
9216 mutex_unlock(&trace_types_lock);
9217 mutex_unlock(&event_mutex);
9222 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9223 * @name: The name of the trace array to be looked up/created.
9225 * Returns pointer to trace array with given name.
9226 * NULL, if it cannot be created.
9228 * NOTE: This function increments the reference counter associated with the
9229 * trace array returned. This makes sure it cannot be freed while in use.
9230 * Use trace_array_put() once the trace array is no longer needed.
9231 * If the trace_array is to be freed, trace_array_destroy() needs to
9232 * be called after the trace_array_put(), or simply let user space delete
9233 * it from the tracefs instances directory. But until the
9234 * trace_array_put() is called, user space can not delete it.
9237 struct trace_array *trace_array_get_by_name(const char *name)
9239 struct trace_array *tr;
9241 mutex_lock(&event_mutex);
9242 mutex_lock(&trace_types_lock);
9244 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9245 if (tr->name && strcmp(tr->name, name) == 0)
9249 tr = trace_array_create(name);
9257 mutex_unlock(&trace_types_lock);
9258 mutex_unlock(&event_mutex);
9261 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9263 static int __remove_instance(struct trace_array *tr)
9267 /* Reference counter for a newly created trace array = 1. */
9268 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9271 list_del(&tr->list);
9273 /* Disable all the flags that were enabled coming in */
9274 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9275 if ((1 << i) & ZEROED_TRACE_FLAGS)
9276 set_tracer_flag(tr, 1 << i, 0);
9279 tracing_set_nop(tr);
9280 clear_ftrace_function_probes(tr);
9281 event_trace_del_tracer(tr);
9282 ftrace_clear_pids(tr);
9283 ftrace_destroy_function_files(tr);
9284 tracefs_remove(tr->dir);
9285 free_percpu(tr->last_func_repeats);
9286 free_trace_buffers(tr);
9288 for (i = 0; i < tr->nr_topts; i++) {
9289 kfree(tr->topts[i].topts);
9293 free_cpumask_var(tr->tracing_cpumask);
9300 int trace_array_destroy(struct trace_array *this_tr)
9302 struct trace_array *tr;
9308 mutex_lock(&event_mutex);
9309 mutex_lock(&trace_types_lock);
9313 /* Making sure trace array exists before destroying it. */
9314 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9315 if (tr == this_tr) {
9316 ret = __remove_instance(tr);
9321 mutex_unlock(&trace_types_lock);
9322 mutex_unlock(&event_mutex);
9326 EXPORT_SYMBOL_GPL(trace_array_destroy);
9328 static int instance_rmdir(const char *name)
9330 struct trace_array *tr;
9333 mutex_lock(&event_mutex);
9334 mutex_lock(&trace_types_lock);
9337 tr = trace_array_find(name);
9339 ret = __remove_instance(tr);
9341 mutex_unlock(&trace_types_lock);
9342 mutex_unlock(&event_mutex);
9347 static __init void create_trace_instances(struct dentry *d_tracer)
9349 struct trace_array *tr;
9351 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9354 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9357 mutex_lock(&event_mutex);
9358 mutex_lock(&trace_types_lock);
9360 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9363 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9364 "Failed to create instance directory\n"))
9368 mutex_unlock(&trace_types_lock);
9369 mutex_unlock(&event_mutex);
9373 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9375 struct trace_event_file *file;
9378 trace_create_file("available_tracers", 0444, d_tracer,
9379 tr, &show_traces_fops);
9381 trace_create_file("current_tracer", 0644, d_tracer,
9382 tr, &set_tracer_fops);
9384 trace_create_file("tracing_cpumask", 0644, d_tracer,
9385 tr, &tracing_cpumask_fops);
9387 trace_create_file("trace_options", 0644, d_tracer,
9388 tr, &tracing_iter_fops);
9390 trace_create_file("trace", 0644, d_tracer,
9393 trace_create_file("trace_pipe", 0444, d_tracer,
9394 tr, &tracing_pipe_fops);
9396 trace_create_file("buffer_size_kb", 0644, d_tracer,
9397 tr, &tracing_entries_fops);
9399 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9400 tr, &tracing_total_entries_fops);
9402 trace_create_file("free_buffer", 0200, d_tracer,
9403 tr, &tracing_free_buffer_fops);
9405 trace_create_file("trace_marker", 0220, d_tracer,
9406 tr, &tracing_mark_fops);
9408 file = __find_event_file(tr, "ftrace", "print");
9409 if (file && file->dir)
9410 trace_create_file("trigger", 0644, file->dir, file,
9411 &event_trigger_fops);
9412 tr->trace_marker_file = file;
9414 trace_create_file("trace_marker_raw", 0220, d_tracer,
9415 tr, &tracing_mark_raw_fops);
9417 trace_create_file("trace_clock", 0644, d_tracer, tr,
9420 trace_create_file("tracing_on", 0644, d_tracer,
9421 tr, &rb_simple_fops);
9423 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9424 &trace_time_stamp_mode_fops);
9426 tr->buffer_percent = 50;
9428 trace_create_file("buffer_percent", 0444, d_tracer,
9429 tr, &buffer_percent_fops);
9431 create_trace_options_dir(tr);
9433 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
9434 trace_create_maxlat_file(tr, d_tracer);
9437 if (ftrace_create_function_files(tr, d_tracer))
9438 MEM_FAIL(1, "Could not allocate function filter files");
9440 #ifdef CONFIG_TRACER_SNAPSHOT
9441 trace_create_file("snapshot", 0644, d_tracer,
9442 tr, &snapshot_fops);
9445 trace_create_file("error_log", 0644, d_tracer,
9446 tr, &tracing_err_log_fops);
9448 for_each_tracing_cpu(cpu)
9449 tracing_init_tracefs_percpu(tr, cpu);
9451 ftrace_init_tracefs(tr, d_tracer);
9454 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9456 struct vfsmount *mnt;
9457 struct file_system_type *type;
9460 * To maintain backward compatibility for tools that mount
9461 * debugfs to get to the tracing facility, tracefs is automatically
9462 * mounted to the debugfs/tracing directory.
9464 type = get_fs_type("tracefs");
9467 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9468 put_filesystem(type);
9477 * tracing_init_dentry - initialize top level trace array
9479 * This is called when creating files or directories in the tracing
9480 * directory. It is called via fs_initcall() by any of the boot up code
9481 * and expects to return the dentry of the top level tracing directory.
9483 int tracing_init_dentry(void)
9485 struct trace_array *tr = &global_trace;
9487 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9488 pr_warn("Tracing disabled due to lockdown\n");
9492 /* The top level trace array uses NULL as parent */
9496 if (WARN_ON(!tracefs_initialized()))
9500 * As there may still be users that expect the tracing
9501 * files to exist in debugfs/tracing, we must automount
9502 * the tracefs file system there, so older tools still
9503 * work with the newer kernel.
9505 tr->dir = debugfs_create_automount("tracing", NULL,
9506 trace_automount, NULL);
9511 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9512 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9514 static struct workqueue_struct *eval_map_wq __initdata;
9515 static struct work_struct eval_map_work __initdata;
9517 static void __init eval_map_work_func(struct work_struct *work)
9521 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9522 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9525 static int __init trace_eval_init(void)
9527 INIT_WORK(&eval_map_work, eval_map_work_func);
9529 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9531 pr_err("Unable to allocate eval_map_wq\n");
9533 eval_map_work_func(&eval_map_work);
9537 queue_work(eval_map_wq, &eval_map_work);
9541 static int __init trace_eval_sync(void)
9543 /* Make sure the eval map updates are finished */
9545 destroy_workqueue(eval_map_wq);
9549 late_initcall_sync(trace_eval_sync);
9552 #ifdef CONFIG_MODULES
9553 static void trace_module_add_evals(struct module *mod)
9555 if (!mod->num_trace_evals)
9559 * Modules with bad taint do not have events created, do
9560 * not bother with enums either.
9562 if (trace_module_has_bad_taint(mod))
9565 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9568 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9569 static void trace_module_remove_evals(struct module *mod)
9571 union trace_eval_map_item *map;
9572 union trace_eval_map_item **last = &trace_eval_maps;
9574 if (!mod->num_trace_evals)
9577 mutex_lock(&trace_eval_mutex);
9579 map = trace_eval_maps;
9582 if (map->head.mod == mod)
9584 map = trace_eval_jmp_to_tail(map);
9585 last = &map->tail.next;
9586 map = map->tail.next;
9591 *last = trace_eval_jmp_to_tail(map)->tail.next;
9594 mutex_unlock(&trace_eval_mutex);
9597 static inline void trace_module_remove_evals(struct module *mod) { }
9598 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9600 static int trace_module_notify(struct notifier_block *self,
9601 unsigned long val, void *data)
9603 struct module *mod = data;
9606 case MODULE_STATE_COMING:
9607 trace_module_add_evals(mod);
9609 case MODULE_STATE_GOING:
9610 trace_module_remove_evals(mod);
9617 static struct notifier_block trace_module_nb = {
9618 .notifier_call = trace_module_notify,
9621 #endif /* CONFIG_MODULES */
9623 static __init int tracer_init_tracefs(void)
9627 trace_access_lock_init();
9629 ret = tracing_init_dentry();
9635 init_tracer_tracefs(&global_trace, NULL);
9636 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9638 trace_create_file("tracing_thresh", 0644, NULL,
9639 &global_trace, &tracing_thresh_fops);
9641 trace_create_file("README", 0444, NULL,
9642 NULL, &tracing_readme_fops);
9644 trace_create_file("saved_cmdlines", 0444, NULL,
9645 NULL, &tracing_saved_cmdlines_fops);
9647 trace_create_file("saved_cmdlines_size", 0644, NULL,
9648 NULL, &tracing_saved_cmdlines_size_fops);
9650 trace_create_file("saved_tgids", 0444, NULL,
9651 NULL, &tracing_saved_tgids_fops);
9655 trace_create_eval_file(NULL);
9657 #ifdef CONFIG_MODULES
9658 register_module_notifier(&trace_module_nb);
9661 #ifdef CONFIG_DYNAMIC_FTRACE
9662 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
9663 NULL, &tracing_dyn_info_fops);
9666 create_trace_instances(NULL);
9668 update_tracer_options(&global_trace);
9673 fs_initcall(tracer_init_tracefs);
9675 static int trace_panic_handler(struct notifier_block *this,
9676 unsigned long event, void *unused)
9678 if (ftrace_dump_on_oops)
9679 ftrace_dump(ftrace_dump_on_oops);
9683 static struct notifier_block trace_panic_notifier = {
9684 .notifier_call = trace_panic_handler,
9686 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9689 static int trace_die_handler(struct notifier_block *self,
9695 if (ftrace_dump_on_oops)
9696 ftrace_dump(ftrace_dump_on_oops);
9704 static struct notifier_block trace_die_notifier = {
9705 .notifier_call = trace_die_handler,
9710 * printk is set to max of 1024, we really don't need it that big.
9711 * Nothing should be printing 1000 characters anyway.
9713 #define TRACE_MAX_PRINT 1000
9716 * Define here KERN_TRACE so that we have one place to modify
9717 * it if we decide to change what log level the ftrace dump
9720 #define KERN_TRACE KERN_EMERG
9723 trace_printk_seq(struct trace_seq *s)
9725 /* Probably should print a warning here. */
9726 if (s->seq.len >= TRACE_MAX_PRINT)
9727 s->seq.len = TRACE_MAX_PRINT;
9730 * More paranoid code. Although the buffer size is set to
9731 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9732 * an extra layer of protection.
9734 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9735 s->seq.len = s->seq.size - 1;
9737 /* should be zero ended, but we are paranoid. */
9738 s->buffer[s->seq.len] = 0;
9740 printk(KERN_TRACE "%s", s->buffer);
9745 void trace_init_global_iter(struct trace_iterator *iter)
9747 iter->tr = &global_trace;
9748 iter->trace = iter->tr->current_trace;
9749 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9750 iter->array_buffer = &global_trace.array_buffer;
9752 if (iter->trace && iter->trace->open)
9753 iter->trace->open(iter);
9755 /* Annotate start of buffers if we had overruns */
9756 if (ring_buffer_overruns(iter->array_buffer->buffer))
9757 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9759 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9760 if (trace_clocks[iter->tr->clock_id].in_ns)
9761 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9764 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9766 /* use static because iter can be a bit big for the stack */
9767 static struct trace_iterator iter;
9768 static atomic_t dump_running;
9769 struct trace_array *tr = &global_trace;
9770 unsigned int old_userobj;
9771 unsigned long flags;
9774 /* Only allow one dump user at a time. */
9775 if (atomic_inc_return(&dump_running) != 1) {
9776 atomic_dec(&dump_running);
9781 * Always turn off tracing when we dump.
9782 * We don't need to show trace output of what happens
9783 * between multiple crashes.
9785 * If the user does a sysrq-z, then they can re-enable
9786 * tracing with echo 1 > tracing_on.
9790 local_irq_save(flags);
9791 printk_nmi_direct_enter();
9793 /* Simulate the iterator */
9794 trace_init_global_iter(&iter);
9795 /* Can not use kmalloc for iter.temp and iter.fmt */
9796 iter.temp = static_temp_buf;
9797 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9798 iter.fmt = static_fmt_buf;
9799 iter.fmt_size = STATIC_FMT_BUF_SIZE;
9801 for_each_tracing_cpu(cpu) {
9802 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9805 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9807 /* don't look at user memory in panic mode */
9808 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9810 switch (oops_dump_mode) {
9812 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9815 iter.cpu_file = raw_smp_processor_id();
9820 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9821 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9824 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9826 /* Did function tracer already get disabled? */
9827 if (ftrace_is_dead()) {
9828 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9829 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9833 * We need to stop all tracing on all CPUS to read
9834 * the next buffer. This is a bit expensive, but is
9835 * not done often. We fill all what we can read,
9836 * and then release the locks again.
9839 while (!trace_empty(&iter)) {
9842 printk(KERN_TRACE "---------------------------------\n");
9846 trace_iterator_reset(&iter);
9847 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9849 if (trace_find_next_entry_inc(&iter) != NULL) {
9852 ret = print_trace_line(&iter);
9853 if (ret != TRACE_TYPE_NO_CONSUME)
9854 trace_consume(&iter);
9856 touch_nmi_watchdog();
9858 trace_printk_seq(&iter.seq);
9862 printk(KERN_TRACE " (ftrace buffer empty)\n");
9864 printk(KERN_TRACE "---------------------------------\n");
9867 tr->trace_flags |= old_userobj;
9869 for_each_tracing_cpu(cpu) {
9870 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9872 atomic_dec(&dump_running);
9873 printk_nmi_direct_exit();
9874 local_irq_restore(flags);
9876 EXPORT_SYMBOL_GPL(ftrace_dump);
9878 #define WRITE_BUFSIZE 4096
9880 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9881 size_t count, loff_t *ppos,
9882 int (*createfn)(const char *))
9884 char *kbuf, *buf, *tmp;
9889 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9893 while (done < count) {
9894 size = count - done;
9896 if (size >= WRITE_BUFSIZE)
9897 size = WRITE_BUFSIZE - 1;
9899 if (copy_from_user(kbuf, buffer + done, size)) {
9906 tmp = strchr(buf, '\n');
9909 size = tmp - buf + 1;
9912 if (done + size < count) {
9915 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9916 pr_warn("Line length is too long: Should be less than %d\n",
9924 /* Remove comments */
9925 tmp = strchr(buf, '#');
9930 ret = createfn(buf);
9935 } while (done < count);
9945 __init static int tracer_alloc_buffers(void)
9951 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9952 pr_warn("Tracing disabled due to lockdown\n");
9957 * Make sure we don't accidentally add more trace options
9958 * than we have bits for.
9960 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9962 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9965 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9966 goto out_free_buffer_mask;
9968 /* Only allocate trace_printk buffers if a trace_printk exists */
9969 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9970 /* Must be called before global_trace.buffer is allocated */
9971 trace_printk_init_buffers();
9973 /* To save memory, keep the ring buffer size to its minimum */
9974 if (ring_buffer_expanded)
9975 ring_buf_size = trace_buf_size;
9979 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9980 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9982 raw_spin_lock_init(&global_trace.start_lock);
9985 * The prepare callbacks allocates some memory for the ring buffer. We
9986 * don't free the buffer if the CPU goes down. If we were to free
9987 * the buffer, then the user would lose any trace that was in the
9988 * buffer. The memory will be removed once the "instance" is removed.
9990 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9991 "trace/RB:preapre", trace_rb_cpu_prepare,
9994 goto out_free_cpumask;
9995 /* Used for event triggers */
9997 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9999 goto out_rm_hp_state;
10001 if (trace_create_savedcmd() < 0)
10002 goto out_free_temp_buffer;
10004 /* TODO: make the number of buffers hot pluggable with CPUS */
10005 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10006 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10007 goto out_free_savedcmd;
10010 if (global_trace.buffer_disabled)
10013 if (trace_boot_clock) {
10014 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10016 pr_warn("Trace clock %s not defined, going back to default\n",
10021 * register_tracer() might reference current_trace, so it
10022 * needs to be set before we register anything. This is
10023 * just a bootstrap of current_trace anyway.
10025 global_trace.current_trace = &nop_trace;
10027 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10029 ftrace_init_global_array_ops(&global_trace);
10031 init_trace_flags_index(&global_trace);
10033 register_tracer(&nop_trace);
10035 /* Function tracing may start here (via kernel command line) */
10036 init_function_trace();
10038 /* All seems OK, enable tracing */
10039 tracing_disabled = 0;
10041 atomic_notifier_chain_register(&panic_notifier_list,
10042 &trace_panic_notifier);
10044 register_die_notifier(&trace_die_notifier);
10046 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10048 INIT_LIST_HEAD(&global_trace.systems);
10049 INIT_LIST_HEAD(&global_trace.events);
10050 INIT_LIST_HEAD(&global_trace.hist_vars);
10051 INIT_LIST_HEAD(&global_trace.err_log);
10052 list_add(&global_trace.list, &ftrace_trace_arrays);
10054 apply_trace_boot_options();
10056 register_snapshot_cmd();
10063 free_saved_cmdlines_buffer(savedcmd);
10064 out_free_temp_buffer:
10065 ring_buffer_free(temp_buffer);
10067 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10069 free_cpumask_var(global_trace.tracing_cpumask);
10070 out_free_buffer_mask:
10071 free_cpumask_var(tracing_buffer_mask);
10076 void __init early_trace_init(void)
10078 if (tracepoint_printk) {
10079 tracepoint_print_iter =
10080 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10081 if (MEM_FAIL(!tracepoint_print_iter,
10082 "Failed to allocate trace iterator\n"))
10083 tracepoint_printk = 0;
10085 static_key_enable(&tracepoint_printk_key.key);
10087 tracer_alloc_buffers();
10090 void __init trace_init(void)
10092 trace_event_init();
10095 __init static void clear_boot_tracer(void)
10098 * The default tracer at boot buffer is an init section.
10099 * This function is called in lateinit. If we did not
10100 * find the boot tracer, then clear it out, to prevent
10101 * later registration from accessing the buffer that is
10102 * about to be freed.
10104 if (!default_bootup_tracer)
10107 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10108 default_bootup_tracer);
10109 default_bootup_tracer = NULL;
10112 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10113 __init static void tracing_set_default_clock(void)
10115 /* sched_clock_stable() is determined in late_initcall */
10116 if (!trace_boot_clock && !sched_clock_stable()) {
10117 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10118 pr_warn("Can not set tracing clock due to lockdown\n");
10122 printk(KERN_WARNING
10123 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10124 "If you want to keep using the local clock, then add:\n"
10125 " \"trace_clock=local\"\n"
10126 "on the kernel command line\n");
10127 tracing_set_clock(&global_trace, "global");
10131 static inline void tracing_set_default_clock(void) { }
10134 __init static int late_trace_init(void)
10136 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10137 static_key_disable(&tracepoint_printk_key.key);
10138 tracepoint_printk = 0;
10141 tracing_set_default_clock();
10142 clear_boot_tracer();
10146 late_initcall_sync(late_trace_init);