1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
54 #include "trace_output.h"
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
60 bool ring_buffer_expanded;
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
69 static bool __read_mostly tracing_selftest_running;
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
75 bool __read_mostly tracing_selftest_disabled;
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
117 static int tracing_disabled = 1;
119 cpumask_var_t __read_mostly tracing_buffer_mask;
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
137 enum ftrace_dump_mode ftrace_dump_on_oops;
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
146 unsigned long length;
149 union trace_eval_map_item;
151 struct trace_eval_map_tail {
153 * "end" is first and points to NULL as it must be different
154 * than "mod" or "eval_string"
156 union trace_eval_map_item *next;
157 const char *end; /* points to NULL */
160 static DEFINE_MUTEX(trace_eval_mutex);
163 * The trace_eval_maps are saved in an array with two extra elements,
164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
167 * pointer to the next array of saved eval_map items.
169 union trace_eval_map_item {
170 struct trace_eval_map map;
171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
181 unsigned int trace_ctx);
183 #define MAX_TRACER_SIZE 100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
187 static bool allocate_snapshot;
189 static int __init set_cmdline_ftrace(char *str)
191 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
192 default_bootup_tracer = bootup_tracer_buf;
193 /* We are using ftrace early, expand it */
194 ring_buffer_expanded = true;
197 __setup("ftrace=", set_cmdline_ftrace);
199 static int __init set_ftrace_dump_on_oops(char *str)
201 if (*str++ != '=' || !*str || !strcmp("1", str)) {
202 ftrace_dump_on_oops = DUMP_ALL;
206 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
207 ftrace_dump_on_oops = DUMP_ORIG;
213 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
215 static int __init stop_trace_on_warning(char *str)
217 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
218 __disable_trace_on_warning = 1;
221 __setup("traceoff_on_warning", stop_trace_on_warning);
223 static int __init boot_alloc_snapshot(char *str)
225 allocate_snapshot = true;
226 /* We also need the main ring buffer expanded */
227 ring_buffer_expanded = true;
230 __setup("alloc_snapshot", boot_alloc_snapshot);
233 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
235 static int __init set_trace_boot_options(char *str)
237 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
240 __setup("trace_options=", set_trace_boot_options);
242 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
243 static char *trace_boot_clock __initdata;
245 static int __init set_trace_boot_clock(char *str)
247 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
248 trace_boot_clock = trace_boot_clock_buf;
251 __setup("trace_clock=", set_trace_boot_clock);
253 static int __init set_tracepoint_printk(char *str)
255 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
256 tracepoint_printk = 1;
259 __setup("tp_printk", set_tracepoint_printk);
261 static int __init set_tracepoint_printk_stop(char *str)
263 tracepoint_printk_stop_on_boot = true;
266 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
268 unsigned long long ns2usecs(u64 nsec)
276 trace_process_export(struct trace_export *export,
277 struct ring_buffer_event *event, int flag)
279 struct trace_entry *entry;
280 unsigned int size = 0;
282 if (export->flags & flag) {
283 entry = ring_buffer_event_data(event);
284 size = ring_buffer_event_length(event);
285 export->write(export, entry, size);
289 static DEFINE_MUTEX(ftrace_export_lock);
291 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
293 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
294 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
295 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
297 static inline void ftrace_exports_enable(struct trace_export *export)
299 if (export->flags & TRACE_EXPORT_FUNCTION)
300 static_branch_inc(&trace_function_exports_enabled);
302 if (export->flags & TRACE_EXPORT_EVENT)
303 static_branch_inc(&trace_event_exports_enabled);
305 if (export->flags & TRACE_EXPORT_MARKER)
306 static_branch_inc(&trace_marker_exports_enabled);
309 static inline void ftrace_exports_disable(struct trace_export *export)
311 if (export->flags & TRACE_EXPORT_FUNCTION)
312 static_branch_dec(&trace_function_exports_enabled);
314 if (export->flags & TRACE_EXPORT_EVENT)
315 static_branch_dec(&trace_event_exports_enabled);
317 if (export->flags & TRACE_EXPORT_MARKER)
318 static_branch_dec(&trace_marker_exports_enabled);
321 static void ftrace_exports(struct ring_buffer_event *event, int flag)
323 struct trace_export *export;
325 preempt_disable_notrace();
327 export = rcu_dereference_raw_check(ftrace_exports_list);
329 trace_process_export(export, event, flag);
330 export = rcu_dereference_raw_check(export->next);
333 preempt_enable_notrace();
337 add_trace_export(struct trace_export **list, struct trace_export *export)
339 rcu_assign_pointer(export->next, *list);
341 * We are entering export into the list but another
342 * CPU might be walking that list. We need to make sure
343 * the export->next pointer is valid before another CPU sees
344 * the export pointer included into the list.
346 rcu_assign_pointer(*list, export);
350 rm_trace_export(struct trace_export **list, struct trace_export *export)
352 struct trace_export **p;
354 for (p = list; *p != NULL; p = &(*p)->next)
361 rcu_assign_pointer(*p, (*p)->next);
367 add_ftrace_export(struct trace_export **list, struct trace_export *export)
369 ftrace_exports_enable(export);
371 add_trace_export(list, export);
375 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
379 ret = rm_trace_export(list, export);
380 ftrace_exports_disable(export);
385 int register_ftrace_export(struct trace_export *export)
387 if (WARN_ON_ONCE(!export->write))
390 mutex_lock(&ftrace_export_lock);
392 add_ftrace_export(&ftrace_exports_list, export);
394 mutex_unlock(&ftrace_export_lock);
398 EXPORT_SYMBOL_GPL(register_ftrace_export);
400 int unregister_ftrace_export(struct trace_export *export)
404 mutex_lock(&ftrace_export_lock);
406 ret = rm_ftrace_export(&ftrace_exports_list, export);
408 mutex_unlock(&ftrace_export_lock);
412 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
414 /* trace_flags holds trace_options default values */
415 #define TRACE_DEFAULT_FLAGS \
416 (FUNCTION_DEFAULT_FLAGS | \
417 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
418 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
419 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
420 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
423 /* trace_options that are only supported by global_trace */
424 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
425 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
427 /* trace_flags that are default zero for instances */
428 #define ZEROED_TRACE_FLAGS \
429 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
432 * The global_trace is the descriptor that holds the top-level tracing
433 * buffers for the live tracing.
435 static struct trace_array global_trace = {
436 .trace_flags = TRACE_DEFAULT_FLAGS,
439 LIST_HEAD(ftrace_trace_arrays);
441 int trace_array_get(struct trace_array *this_tr)
443 struct trace_array *tr;
446 mutex_lock(&trace_types_lock);
447 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
454 mutex_unlock(&trace_types_lock);
459 static void __trace_array_put(struct trace_array *this_tr)
461 WARN_ON(!this_tr->ref);
466 * trace_array_put - Decrement the reference counter for this trace array.
467 * @this_tr : pointer to the trace array
469 * NOTE: Use this when we no longer need the trace array returned by
470 * trace_array_get_by_name(). This ensures the trace array can be later
474 void trace_array_put(struct trace_array *this_tr)
479 mutex_lock(&trace_types_lock);
480 __trace_array_put(this_tr);
481 mutex_unlock(&trace_types_lock);
483 EXPORT_SYMBOL_GPL(trace_array_put);
485 int tracing_check_open_get_tr(struct trace_array *tr)
489 ret = security_locked_down(LOCKDOWN_TRACEFS);
493 if (tracing_disabled)
496 if (tr && trace_array_get(tr) < 0)
502 int call_filter_check_discard(struct trace_event_call *call, void *rec,
503 struct trace_buffer *buffer,
504 struct ring_buffer_event *event)
506 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
507 !filter_match_preds(call->filter, rec)) {
508 __trace_event_discard_commit(buffer, event);
516 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
517 * @filtered_pids: The list of pids to check
518 * @search_pid: The PID to find in @filtered_pids
520 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
523 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
525 return trace_pid_list_is_set(filtered_pids, search_pid);
529 * trace_ignore_this_task - should a task be ignored for tracing
530 * @filtered_pids: The list of pids to check
531 * @filtered_no_pids: The list of pids not to be traced
532 * @task: The task that should be ignored if not filtered
534 * Checks if @task should be traced or not from @filtered_pids.
535 * Returns true if @task should *NOT* be traced.
536 * Returns false if @task should be traced.
539 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
540 struct trace_pid_list *filtered_no_pids,
541 struct task_struct *task)
544 * If filtered_no_pids is not empty, and the task's pid is listed
545 * in filtered_no_pids, then return true.
546 * Otherwise, if filtered_pids is empty, that means we can
547 * trace all tasks. If it has content, then only trace pids
548 * within filtered_pids.
551 return (filtered_pids &&
552 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
554 trace_find_filtered_pid(filtered_no_pids, task->pid));
558 * trace_filter_add_remove_task - Add or remove a task from a pid_list
559 * @pid_list: The list to modify
560 * @self: The current task for fork or NULL for exit
561 * @task: The task to add or remove
563 * If adding a task, if @self is defined, the task is only added if @self
564 * is also included in @pid_list. This happens on fork and tasks should
565 * only be added when the parent is listed. If @self is NULL, then the
566 * @task pid will be removed from the list, which would happen on exit
569 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
570 struct task_struct *self,
571 struct task_struct *task)
576 /* For forks, we only add if the forking task is listed */
578 if (!trace_find_filtered_pid(pid_list, self->pid))
582 /* "self" is set for forks, and NULL for exits */
584 trace_pid_list_set(pid_list, task->pid);
586 trace_pid_list_clear(pid_list, task->pid);
590 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
591 * @pid_list: The pid list to show
592 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
593 * @pos: The position of the file
595 * This is used by the seq_file "next" operation to iterate the pids
596 * listed in a trace_pid_list structure.
598 * Returns the pid+1 as we want to display pid of zero, but NULL would
599 * stop the iteration.
601 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
603 long pid = (unsigned long)v;
608 /* pid already is +1 of the actual previous bit */
609 if (trace_pid_list_next(pid_list, pid, &next) < 0)
614 /* Return pid + 1 to allow zero to be represented */
615 return (void *)(pid + 1);
619 * trace_pid_start - Used for seq_file to start reading pid lists
620 * @pid_list: The pid list to show
621 * @pos: The position of the file
623 * This is used by seq_file "start" operation to start the iteration
626 * Returns the pid+1 as we want to display pid of zero, but NULL would
627 * stop the iteration.
629 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
635 if (trace_pid_list_first(pid_list, &first) < 0)
640 /* Return pid + 1 so that zero can be the exit value */
641 for (pid++; pid && l < *pos;
642 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
648 * trace_pid_show - show the current pid in seq_file processing
649 * @m: The seq_file structure to write into
650 * @v: A void pointer of the pid (+1) value to display
652 * Can be directly used by seq_file operations to display the current
655 int trace_pid_show(struct seq_file *m, void *v)
657 unsigned long pid = (unsigned long)v - 1;
659 seq_printf(m, "%lu\n", pid);
663 /* 128 should be much more than enough */
664 #define PID_BUF_SIZE 127
666 int trace_pid_write(struct trace_pid_list *filtered_pids,
667 struct trace_pid_list **new_pid_list,
668 const char __user *ubuf, size_t cnt)
670 struct trace_pid_list *pid_list;
671 struct trace_parser parser;
679 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
683 * Always recreate a new array. The write is an all or nothing
684 * operation. Always create a new array when adding new pids by
685 * the user. If the operation fails, then the current list is
688 pid_list = trace_pid_list_alloc();
690 trace_parser_put(&parser);
695 /* copy the current bits to the new max */
696 ret = trace_pid_list_first(filtered_pids, &pid);
698 trace_pid_list_set(pid_list, pid);
699 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
709 ret = trace_get_user(&parser, ubuf, cnt, &pos);
710 if (ret < 0 || !trace_parser_loaded(&parser))
718 if (kstrtoul(parser.buffer, 0, &val))
723 if (trace_pid_list_set(pid_list, pid) < 0) {
729 trace_parser_clear(&parser);
732 trace_parser_put(&parser);
735 trace_pid_list_free(pid_list);
740 /* Cleared the list of pids */
741 trace_pid_list_free(pid_list);
746 *new_pid_list = pid_list;
751 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
755 /* Early boot up does not have a buffer yet */
757 return trace_clock_local();
759 ts = ring_buffer_time_stamp(buf->buffer);
760 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
765 u64 ftrace_now(int cpu)
767 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
771 * tracing_is_enabled - Show if global_trace has been enabled
773 * Shows if the global trace has been enabled or not. It uses the
774 * mirror flag "buffer_disabled" to be used in fast paths such as for
775 * the irqsoff tracer. But it may be inaccurate due to races. If you
776 * need to know the accurate state, use tracing_is_on() which is a little
777 * slower, but accurate.
779 int tracing_is_enabled(void)
782 * For quick access (irqsoff uses this in fast path), just
783 * return the mirror variable of the state of the ring buffer.
784 * It's a little racy, but we don't really care.
787 return !global_trace.buffer_disabled;
791 * trace_buf_size is the size in bytes that is allocated
792 * for a buffer. Note, the number of bytes is always rounded
795 * This number is purposely set to a low number of 16384.
796 * If the dump on oops happens, it will be much appreciated
797 * to not have to wait for all that output. Anyway this can be
798 * boot time and run time configurable.
800 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
802 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
804 /* trace_types holds a link list of available tracers. */
805 static struct tracer *trace_types __read_mostly;
808 * trace_types_lock is used to protect the trace_types list.
810 DEFINE_MUTEX(trace_types_lock);
813 * serialize the access of the ring buffer
815 * ring buffer serializes readers, but it is low level protection.
816 * The validity of the events (which returns by ring_buffer_peek() ..etc)
817 * are not protected by ring buffer.
819 * The content of events may become garbage if we allow other process consumes
820 * these events concurrently:
821 * A) the page of the consumed events may become a normal page
822 * (not reader page) in ring buffer, and this page will be rewritten
823 * by events producer.
824 * B) The page of the consumed events may become a page for splice_read,
825 * and this page will be returned to system.
827 * These primitives allow multi process access to different cpu ring buffer
830 * These primitives don't distinguish read-only and read-consume access.
831 * Multi read-only access are also serialized.
835 static DECLARE_RWSEM(all_cpu_access_lock);
836 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
838 static inline void trace_access_lock(int cpu)
840 if (cpu == RING_BUFFER_ALL_CPUS) {
841 /* gain it for accessing the whole ring buffer. */
842 down_write(&all_cpu_access_lock);
844 /* gain it for accessing a cpu ring buffer. */
846 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
847 down_read(&all_cpu_access_lock);
849 /* Secondly block other access to this @cpu ring buffer. */
850 mutex_lock(&per_cpu(cpu_access_lock, cpu));
854 static inline void trace_access_unlock(int cpu)
856 if (cpu == RING_BUFFER_ALL_CPUS) {
857 up_write(&all_cpu_access_lock);
859 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
860 up_read(&all_cpu_access_lock);
864 static inline void trace_access_lock_init(void)
868 for_each_possible_cpu(cpu)
869 mutex_init(&per_cpu(cpu_access_lock, cpu));
874 static DEFINE_MUTEX(access_lock);
876 static inline void trace_access_lock(int cpu)
879 mutex_lock(&access_lock);
882 static inline void trace_access_unlock(int cpu)
885 mutex_unlock(&access_lock);
888 static inline void trace_access_lock_init(void)
894 #ifdef CONFIG_STACKTRACE
895 static void __ftrace_trace_stack(struct trace_buffer *buffer,
896 unsigned int trace_ctx,
897 int skip, struct pt_regs *regs);
898 static inline void ftrace_trace_stack(struct trace_array *tr,
899 struct trace_buffer *buffer,
900 unsigned int trace_ctx,
901 int skip, struct pt_regs *regs);
904 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
905 unsigned int trace_ctx,
906 int skip, struct pt_regs *regs)
909 static inline void ftrace_trace_stack(struct trace_array *tr,
910 struct trace_buffer *buffer,
911 unsigned long trace_ctx,
912 int skip, struct pt_regs *regs)
918 static __always_inline void
919 trace_event_setup(struct ring_buffer_event *event,
920 int type, unsigned int trace_ctx)
922 struct trace_entry *ent = ring_buffer_event_data(event);
924 tracing_generic_entry_update(ent, type, trace_ctx);
927 static __always_inline struct ring_buffer_event *
928 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
931 unsigned int trace_ctx)
933 struct ring_buffer_event *event;
935 event = ring_buffer_lock_reserve(buffer, len);
937 trace_event_setup(event, type, trace_ctx);
942 void tracer_tracing_on(struct trace_array *tr)
944 if (tr->array_buffer.buffer)
945 ring_buffer_record_on(tr->array_buffer.buffer);
947 * This flag is looked at when buffers haven't been allocated
948 * yet, or by some tracers (like irqsoff), that just want to
949 * know if the ring buffer has been disabled, but it can handle
950 * races of where it gets disabled but we still do a record.
951 * As the check is in the fast path of the tracers, it is more
952 * important to be fast than accurate.
954 tr->buffer_disabled = 0;
955 /* Make the flag seen by readers */
960 * tracing_on - enable tracing buffers
962 * This function enables tracing buffers that may have been
963 * disabled with tracing_off.
965 void tracing_on(void)
967 tracer_tracing_on(&global_trace);
969 EXPORT_SYMBOL_GPL(tracing_on);
972 static __always_inline void
973 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
975 __this_cpu_write(trace_taskinfo_save, true);
977 /* If this is the temp buffer, we need to commit fully */
978 if (this_cpu_read(trace_buffered_event) == event) {
979 /* Length is in event->array[0] */
980 ring_buffer_write(buffer, event->array[0], &event->array[1]);
981 /* Release the temp buffer */
982 this_cpu_dec(trace_buffered_event_cnt);
984 ring_buffer_unlock_commit(buffer, event);
988 * __trace_puts - write a constant string into the trace buffer.
989 * @ip: The address of the caller
990 * @str: The constant string to write
991 * @size: The size of the string.
993 int __trace_puts(unsigned long ip, const char *str, int size)
995 struct ring_buffer_event *event;
996 struct trace_buffer *buffer;
997 struct print_entry *entry;
998 unsigned int trace_ctx;
1001 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1004 if (unlikely(tracing_selftest_running || tracing_disabled))
1007 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1009 trace_ctx = tracing_gen_ctx();
1010 buffer = global_trace.array_buffer.buffer;
1011 ring_buffer_nest_start(buffer);
1012 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1019 entry = ring_buffer_event_data(event);
1022 memcpy(&entry->buf, str, size);
1024 /* Add a newline if necessary */
1025 if (entry->buf[size - 1] != '\n') {
1026 entry->buf[size] = '\n';
1027 entry->buf[size + 1] = '\0';
1029 entry->buf[size] = '\0';
1031 __buffer_unlock_commit(buffer, event);
1032 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1034 ring_buffer_nest_end(buffer);
1037 EXPORT_SYMBOL_GPL(__trace_puts);
1040 * __trace_bputs - write the pointer to a constant string into trace buffer
1041 * @ip: The address of the caller
1042 * @str: The constant string to write to the buffer to
1044 int __trace_bputs(unsigned long ip, const char *str)
1046 struct ring_buffer_event *event;
1047 struct trace_buffer *buffer;
1048 struct bputs_entry *entry;
1049 unsigned int trace_ctx;
1050 int size = sizeof(struct bputs_entry);
1053 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1056 if (unlikely(tracing_selftest_running || tracing_disabled))
1059 trace_ctx = tracing_gen_ctx();
1060 buffer = global_trace.array_buffer.buffer;
1062 ring_buffer_nest_start(buffer);
1063 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1068 entry = ring_buffer_event_data(event);
1072 __buffer_unlock_commit(buffer, event);
1073 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1077 ring_buffer_nest_end(buffer);
1080 EXPORT_SYMBOL_GPL(__trace_bputs);
1082 #ifdef CONFIG_TRACER_SNAPSHOT
1083 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1086 struct tracer *tracer = tr->current_trace;
1087 unsigned long flags;
1090 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1091 internal_trace_puts("*** snapshot is being ignored ***\n");
1095 if (!tr->allocated_snapshot) {
1096 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1097 internal_trace_puts("*** stopping trace here! ***\n");
1102 /* Note, snapshot can not be used when the tracer uses it */
1103 if (tracer->use_max_tr) {
1104 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1105 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1109 local_irq_save(flags);
1110 update_max_tr(tr, current, smp_processor_id(), cond_data);
1111 local_irq_restore(flags);
1114 void tracing_snapshot_instance(struct trace_array *tr)
1116 tracing_snapshot_instance_cond(tr, NULL);
1120 * tracing_snapshot - take a snapshot of the current buffer.
1122 * This causes a swap between the snapshot buffer and the current live
1123 * tracing buffer. You can use this to take snapshots of the live
1124 * trace when some condition is triggered, but continue to trace.
1126 * Note, make sure to allocate the snapshot with either
1127 * a tracing_snapshot_alloc(), or by doing it manually
1128 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1130 * If the snapshot buffer is not allocated, it will stop tracing.
1131 * Basically making a permanent snapshot.
1133 void tracing_snapshot(void)
1135 struct trace_array *tr = &global_trace;
1137 tracing_snapshot_instance(tr);
1139 EXPORT_SYMBOL_GPL(tracing_snapshot);
1142 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1143 * @tr: The tracing instance to snapshot
1144 * @cond_data: The data to be tested conditionally, and possibly saved
1146 * This is the same as tracing_snapshot() except that the snapshot is
1147 * conditional - the snapshot will only happen if the
1148 * cond_snapshot.update() implementation receiving the cond_data
1149 * returns true, which means that the trace array's cond_snapshot
1150 * update() operation used the cond_data to determine whether the
1151 * snapshot should be taken, and if it was, presumably saved it along
1152 * with the snapshot.
1154 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1156 tracing_snapshot_instance_cond(tr, cond_data);
1158 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1161 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1162 * @tr: The tracing instance
1164 * When the user enables a conditional snapshot using
1165 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1166 * with the snapshot. This accessor is used to retrieve it.
1168 * Should not be called from cond_snapshot.update(), since it takes
1169 * the tr->max_lock lock, which the code calling
1170 * cond_snapshot.update() has already done.
1172 * Returns the cond_data associated with the trace array's snapshot.
1174 void *tracing_cond_snapshot_data(struct trace_array *tr)
1176 void *cond_data = NULL;
1178 arch_spin_lock(&tr->max_lock);
1180 if (tr->cond_snapshot)
1181 cond_data = tr->cond_snapshot->cond_data;
1183 arch_spin_unlock(&tr->max_lock);
1187 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1189 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1190 struct array_buffer *size_buf, int cpu_id);
1191 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1193 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1197 if (!tr->allocated_snapshot) {
1199 /* allocate spare buffer */
1200 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1201 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1205 tr->allocated_snapshot = true;
1211 static void free_snapshot(struct trace_array *tr)
1214 * We don't free the ring buffer. instead, resize it because
1215 * The max_tr ring buffer has some state (e.g. ring->clock) and
1216 * we want preserve it.
1218 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1219 set_buffer_entries(&tr->max_buffer, 1);
1220 tracing_reset_online_cpus(&tr->max_buffer);
1221 tr->allocated_snapshot = false;
1225 * tracing_alloc_snapshot - allocate snapshot buffer.
1227 * This only allocates the snapshot buffer if it isn't already
1228 * allocated - it doesn't also take a snapshot.
1230 * This is meant to be used in cases where the snapshot buffer needs
1231 * to be set up for events that can't sleep but need to be able to
1232 * trigger a snapshot.
1234 int tracing_alloc_snapshot(void)
1236 struct trace_array *tr = &global_trace;
1239 ret = tracing_alloc_snapshot_instance(tr);
1244 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1247 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1249 * This is similar to tracing_snapshot(), but it will allocate the
1250 * snapshot buffer if it isn't already allocated. Use this only
1251 * where it is safe to sleep, as the allocation may sleep.
1253 * This causes a swap between the snapshot buffer and the current live
1254 * tracing buffer. You can use this to take snapshots of the live
1255 * trace when some condition is triggered, but continue to trace.
1257 void tracing_snapshot_alloc(void)
1261 ret = tracing_alloc_snapshot();
1267 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1270 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1271 * @tr: The tracing instance
1272 * @cond_data: User data to associate with the snapshot
1273 * @update: Implementation of the cond_snapshot update function
1275 * Check whether the conditional snapshot for the given instance has
1276 * already been enabled, or if the current tracer is already using a
1277 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1278 * save the cond_data and update function inside.
1280 * Returns 0 if successful, error otherwise.
1282 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1283 cond_update_fn_t update)
1285 struct cond_snapshot *cond_snapshot;
1288 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1292 cond_snapshot->cond_data = cond_data;
1293 cond_snapshot->update = update;
1295 mutex_lock(&trace_types_lock);
1297 ret = tracing_alloc_snapshot_instance(tr);
1301 if (tr->current_trace->use_max_tr) {
1307 * The cond_snapshot can only change to NULL without the
1308 * trace_types_lock. We don't care if we race with it going
1309 * to NULL, but we want to make sure that it's not set to
1310 * something other than NULL when we get here, which we can
1311 * do safely with only holding the trace_types_lock and not
1312 * having to take the max_lock.
1314 if (tr->cond_snapshot) {
1319 arch_spin_lock(&tr->max_lock);
1320 tr->cond_snapshot = cond_snapshot;
1321 arch_spin_unlock(&tr->max_lock);
1323 mutex_unlock(&trace_types_lock);
1328 mutex_unlock(&trace_types_lock);
1329 kfree(cond_snapshot);
1332 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1335 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1336 * @tr: The tracing instance
1338 * Check whether the conditional snapshot for the given instance is
1339 * enabled; if so, free the cond_snapshot associated with it,
1340 * otherwise return -EINVAL.
1342 * Returns 0 if successful, error otherwise.
1344 int tracing_snapshot_cond_disable(struct trace_array *tr)
1348 arch_spin_lock(&tr->max_lock);
1350 if (!tr->cond_snapshot)
1353 kfree(tr->cond_snapshot);
1354 tr->cond_snapshot = NULL;
1357 arch_spin_unlock(&tr->max_lock);
1361 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1363 void tracing_snapshot(void)
1365 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1367 EXPORT_SYMBOL_GPL(tracing_snapshot);
1368 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1370 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1372 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1373 int tracing_alloc_snapshot(void)
1375 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1378 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1379 void tracing_snapshot_alloc(void)
1384 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1385 void *tracing_cond_snapshot_data(struct trace_array *tr)
1389 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1390 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1394 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1395 int tracing_snapshot_cond_disable(struct trace_array *tr)
1399 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1400 #endif /* CONFIG_TRACER_SNAPSHOT */
1402 void tracer_tracing_off(struct trace_array *tr)
1404 if (tr->array_buffer.buffer)
1405 ring_buffer_record_off(tr->array_buffer.buffer);
1407 * This flag is looked at when buffers haven't been allocated
1408 * yet, or by some tracers (like irqsoff), that just want to
1409 * know if the ring buffer has been disabled, but it can handle
1410 * races of where it gets disabled but we still do a record.
1411 * As the check is in the fast path of the tracers, it is more
1412 * important to be fast than accurate.
1414 tr->buffer_disabled = 1;
1415 /* Make the flag seen by readers */
1420 * tracing_off - turn off tracing buffers
1422 * This function stops the tracing buffers from recording data.
1423 * It does not disable any overhead the tracers themselves may
1424 * be causing. This function simply causes all recording to
1425 * the ring buffers to fail.
1427 void tracing_off(void)
1429 tracer_tracing_off(&global_trace);
1431 EXPORT_SYMBOL_GPL(tracing_off);
1433 void disable_trace_on_warning(void)
1435 if (__disable_trace_on_warning) {
1436 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1437 "Disabling tracing due to warning\n");
1443 * tracer_tracing_is_on - show real state of ring buffer enabled
1444 * @tr : the trace array to know if ring buffer is enabled
1446 * Shows real state of the ring buffer if it is enabled or not.
1448 bool tracer_tracing_is_on(struct trace_array *tr)
1450 if (tr->array_buffer.buffer)
1451 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1452 return !tr->buffer_disabled;
1456 * tracing_is_on - show state of ring buffers enabled
1458 int tracing_is_on(void)
1460 return tracer_tracing_is_on(&global_trace);
1462 EXPORT_SYMBOL_GPL(tracing_is_on);
1464 static int __init set_buf_size(char *str)
1466 unsigned long buf_size;
1470 buf_size = memparse(str, &str);
1471 /* nr_entries can not be zero */
1474 trace_buf_size = buf_size;
1477 __setup("trace_buf_size=", set_buf_size);
1479 static int __init set_tracing_thresh(char *str)
1481 unsigned long threshold;
1486 ret = kstrtoul(str, 0, &threshold);
1489 tracing_thresh = threshold * 1000;
1492 __setup("tracing_thresh=", set_tracing_thresh);
1494 unsigned long nsecs_to_usecs(unsigned long nsecs)
1496 return nsecs / 1000;
1500 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1501 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1502 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1503 * of strings in the order that the evals (enum) were defined.
1508 /* These must match the bit positions in trace_iterator_flags */
1509 static const char *trace_options[] = {
1517 int in_ns; /* is this clock in nanoseconds? */
1518 } trace_clocks[] = {
1519 { trace_clock_local, "local", 1 },
1520 { trace_clock_global, "global", 1 },
1521 { trace_clock_counter, "counter", 0 },
1522 { trace_clock_jiffies, "uptime", 0 },
1523 { trace_clock, "perf", 1 },
1524 { ktime_get_mono_fast_ns, "mono", 1 },
1525 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1526 { ktime_get_boot_fast_ns, "boot", 1 },
1530 bool trace_clock_in_ns(struct trace_array *tr)
1532 if (trace_clocks[tr->clock_id].in_ns)
1539 * trace_parser_get_init - gets the buffer for trace parser
1541 int trace_parser_get_init(struct trace_parser *parser, int size)
1543 memset(parser, 0, sizeof(*parser));
1545 parser->buffer = kmalloc(size, GFP_KERNEL);
1546 if (!parser->buffer)
1549 parser->size = size;
1554 * trace_parser_put - frees the buffer for trace parser
1556 void trace_parser_put(struct trace_parser *parser)
1558 kfree(parser->buffer);
1559 parser->buffer = NULL;
1563 * trace_get_user - reads the user input string separated by space
1564 * (matched by isspace(ch))
1566 * For each string found the 'struct trace_parser' is updated,
1567 * and the function returns.
1569 * Returns number of bytes read.
1571 * See kernel/trace/trace.h for 'struct trace_parser' details.
1573 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1574 size_t cnt, loff_t *ppos)
1581 trace_parser_clear(parser);
1583 ret = get_user(ch, ubuf++);
1591 * The parser is not finished with the last write,
1592 * continue reading the user input without skipping spaces.
1594 if (!parser->cont) {
1595 /* skip white space */
1596 while (cnt && isspace(ch)) {
1597 ret = get_user(ch, ubuf++);
1606 /* only spaces were written */
1607 if (isspace(ch) || !ch) {
1614 /* read the non-space input */
1615 while (cnt && !isspace(ch) && ch) {
1616 if (parser->idx < parser->size - 1)
1617 parser->buffer[parser->idx++] = ch;
1622 ret = get_user(ch, ubuf++);
1629 /* We either got finished input or we have to wait for another call. */
1630 if (isspace(ch) || !ch) {
1631 parser->buffer[parser->idx] = 0;
1632 parser->cont = false;
1633 } else if (parser->idx < parser->size - 1) {
1634 parser->cont = true;
1635 parser->buffer[parser->idx++] = ch;
1636 /* Make sure the parsed string always terminates with '\0'. */
1637 parser->buffer[parser->idx] = 0;
1650 /* TODO add a seq_buf_to_buffer() */
1651 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1655 if (trace_seq_used(s) <= s->seq.readpos)
1658 len = trace_seq_used(s) - s->seq.readpos;
1661 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1663 s->seq.readpos += cnt;
1667 unsigned long __read_mostly tracing_thresh;
1668 static const struct file_operations tracing_max_lat_fops;
1670 #ifdef LATENCY_FS_NOTIFY
1672 static struct workqueue_struct *fsnotify_wq;
1674 static void latency_fsnotify_workfn(struct work_struct *work)
1676 struct trace_array *tr = container_of(work, struct trace_array,
1678 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1681 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1683 struct trace_array *tr = container_of(iwork, struct trace_array,
1685 queue_work(fsnotify_wq, &tr->fsnotify_work);
1688 static void trace_create_maxlat_file(struct trace_array *tr,
1689 struct dentry *d_tracer)
1691 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1692 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1693 tr->d_max_latency = trace_create_file("tracing_max_latency",
1695 d_tracer, &tr->max_latency,
1696 &tracing_max_lat_fops);
1699 __init static int latency_fsnotify_init(void)
1701 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1702 WQ_UNBOUND | WQ_HIGHPRI, 0);
1704 pr_err("Unable to allocate tr_max_lat_wq\n");
1710 late_initcall_sync(latency_fsnotify_init);
1712 void latency_fsnotify(struct trace_array *tr)
1717 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1718 * possible that we are called from __schedule() or do_idle(), which
1719 * could cause a deadlock.
1721 irq_work_queue(&tr->fsnotify_irqwork);
1724 #elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
1725 || defined(CONFIG_OSNOISE_TRACER)
1727 #define trace_create_maxlat_file(tr, d_tracer) \
1728 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1729 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1732 #define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
1735 #ifdef CONFIG_TRACER_MAX_TRACE
1737 * Copy the new maximum trace into the separate maximum-trace
1738 * structure. (this way the maximum trace is permanently saved,
1739 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1742 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1744 struct array_buffer *trace_buf = &tr->array_buffer;
1745 struct array_buffer *max_buf = &tr->max_buffer;
1746 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1747 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1750 max_buf->time_start = data->preempt_timestamp;
1752 max_data->saved_latency = tr->max_latency;
1753 max_data->critical_start = data->critical_start;
1754 max_data->critical_end = data->critical_end;
1756 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1757 max_data->pid = tsk->pid;
1759 * If tsk == current, then use current_uid(), as that does not use
1760 * RCU. The irq tracer can be called out of RCU scope.
1763 max_data->uid = current_uid();
1765 max_data->uid = task_uid(tsk);
1767 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1768 max_data->policy = tsk->policy;
1769 max_data->rt_priority = tsk->rt_priority;
1771 /* record this tasks comm */
1772 tracing_record_cmdline(tsk);
1773 latency_fsnotify(tr);
1777 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1779 * @tsk: the task with the latency
1780 * @cpu: The cpu that initiated the trace.
1781 * @cond_data: User data associated with a conditional snapshot
1783 * Flip the buffers between the @tr and the max_tr and record information
1784 * about which task was the cause of this latency.
1787 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1793 WARN_ON_ONCE(!irqs_disabled());
1795 if (!tr->allocated_snapshot) {
1796 /* Only the nop tracer should hit this when disabling */
1797 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1801 arch_spin_lock(&tr->max_lock);
1803 /* Inherit the recordable setting from array_buffer */
1804 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1805 ring_buffer_record_on(tr->max_buffer.buffer);
1807 ring_buffer_record_off(tr->max_buffer.buffer);
1809 #ifdef CONFIG_TRACER_SNAPSHOT
1810 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1813 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1815 __update_max_tr(tr, tsk, cpu);
1818 arch_spin_unlock(&tr->max_lock);
1822 * update_max_tr_single - only copy one trace over, and reset the rest
1824 * @tsk: task with the latency
1825 * @cpu: the cpu of the buffer to copy.
1827 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1830 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1837 WARN_ON_ONCE(!irqs_disabled());
1838 if (!tr->allocated_snapshot) {
1839 /* Only the nop tracer should hit this when disabling */
1840 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1844 arch_spin_lock(&tr->max_lock);
1846 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1848 if (ret == -EBUSY) {
1850 * We failed to swap the buffer due to a commit taking
1851 * place on this CPU. We fail to record, but we reset
1852 * the max trace buffer (no one writes directly to it)
1853 * and flag that it failed.
1855 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1856 "Failed to swap buffers due to commit in progress\n");
1859 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1861 __update_max_tr(tr, tsk, cpu);
1862 arch_spin_unlock(&tr->max_lock);
1864 #endif /* CONFIG_TRACER_MAX_TRACE */
1866 static int wait_on_pipe(struct trace_iterator *iter, int full)
1868 /* Iterators are static, they should be filled or empty */
1869 if (trace_buffer_iter(iter, iter->cpu_file))
1872 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1876 #ifdef CONFIG_FTRACE_STARTUP_TEST
1877 static bool selftests_can_run;
1879 struct trace_selftests {
1880 struct list_head list;
1881 struct tracer *type;
1884 static LIST_HEAD(postponed_selftests);
1886 static int save_selftest(struct tracer *type)
1888 struct trace_selftests *selftest;
1890 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1894 selftest->type = type;
1895 list_add(&selftest->list, &postponed_selftests);
1899 static int run_tracer_selftest(struct tracer *type)
1901 struct trace_array *tr = &global_trace;
1902 struct tracer *saved_tracer = tr->current_trace;
1905 if (!type->selftest || tracing_selftest_disabled)
1909 * If a tracer registers early in boot up (before scheduling is
1910 * initialized and such), then do not run its selftests yet.
1911 * Instead, run it a little later in the boot process.
1913 if (!selftests_can_run)
1914 return save_selftest(type);
1916 if (!tracing_is_on()) {
1917 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1923 * Run a selftest on this tracer.
1924 * Here we reset the trace buffer, and set the current
1925 * tracer to be this tracer. The tracer can then run some
1926 * internal tracing to verify that everything is in order.
1927 * If we fail, we do not register this tracer.
1929 tracing_reset_online_cpus(&tr->array_buffer);
1931 tr->current_trace = type;
1933 #ifdef CONFIG_TRACER_MAX_TRACE
1934 if (type->use_max_tr) {
1935 /* If we expanded the buffers, make sure the max is expanded too */
1936 if (ring_buffer_expanded)
1937 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1938 RING_BUFFER_ALL_CPUS);
1939 tr->allocated_snapshot = true;
1943 /* the test is responsible for initializing and enabling */
1944 pr_info("Testing tracer %s: ", type->name);
1945 ret = type->selftest(type, tr);
1946 /* the test is responsible for resetting too */
1947 tr->current_trace = saved_tracer;
1949 printk(KERN_CONT "FAILED!\n");
1950 /* Add the warning after printing 'FAILED' */
1954 /* Only reset on passing, to avoid touching corrupted buffers */
1955 tracing_reset_online_cpus(&tr->array_buffer);
1957 #ifdef CONFIG_TRACER_MAX_TRACE
1958 if (type->use_max_tr) {
1959 tr->allocated_snapshot = false;
1961 /* Shrink the max buffer again */
1962 if (ring_buffer_expanded)
1963 ring_buffer_resize(tr->max_buffer.buffer, 1,
1964 RING_BUFFER_ALL_CPUS);
1968 printk(KERN_CONT "PASSED\n");
1972 static __init int init_trace_selftests(void)
1974 struct trace_selftests *p, *n;
1975 struct tracer *t, **last;
1978 selftests_can_run = true;
1980 mutex_lock(&trace_types_lock);
1982 if (list_empty(&postponed_selftests))
1985 pr_info("Running postponed tracer tests:\n");
1987 tracing_selftest_running = true;
1988 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1989 /* This loop can take minutes when sanitizers are enabled, so
1990 * lets make sure we allow RCU processing.
1993 ret = run_tracer_selftest(p->type);
1994 /* If the test fails, then warn and remove from available_tracers */
1996 WARN(1, "tracer: %s failed selftest, disabling\n",
1998 last = &trace_types;
1999 for (t = trace_types; t; t = t->next) {
2010 tracing_selftest_running = false;
2013 mutex_unlock(&trace_types_lock);
2017 core_initcall(init_trace_selftests);
2019 static inline int run_tracer_selftest(struct tracer *type)
2023 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2025 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2027 static void __init apply_trace_boot_options(void);
2030 * register_tracer - register a tracer with the ftrace system.
2031 * @type: the plugin for the tracer
2033 * Register a new plugin tracer.
2035 int __init register_tracer(struct tracer *type)
2041 pr_info("Tracer must have a name\n");
2045 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2046 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2050 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2051 pr_warn("Can not register tracer %s due to lockdown\n",
2056 mutex_lock(&trace_types_lock);
2058 tracing_selftest_running = true;
2060 for (t = trace_types; t; t = t->next) {
2061 if (strcmp(type->name, t->name) == 0) {
2063 pr_info("Tracer %s already registered\n",
2070 if (!type->set_flag)
2071 type->set_flag = &dummy_set_flag;
2073 /*allocate a dummy tracer_flags*/
2074 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2079 type->flags->val = 0;
2080 type->flags->opts = dummy_tracer_opt;
2082 if (!type->flags->opts)
2083 type->flags->opts = dummy_tracer_opt;
2085 /* store the tracer for __set_tracer_option */
2086 type->flags->trace = type;
2088 ret = run_tracer_selftest(type);
2092 type->next = trace_types;
2094 add_tracer_options(&global_trace, type);
2097 tracing_selftest_running = false;
2098 mutex_unlock(&trace_types_lock);
2100 if (ret || !default_bootup_tracer)
2103 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2106 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2107 /* Do we want this tracer to start on bootup? */
2108 tracing_set_tracer(&global_trace, type->name);
2109 default_bootup_tracer = NULL;
2111 apply_trace_boot_options();
2113 /* disable other selftests, since this will break it. */
2114 disable_tracing_selftest("running a tracer");
2120 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2122 struct trace_buffer *buffer = buf->buffer;
2127 ring_buffer_record_disable(buffer);
2129 /* Make sure all commits have finished */
2131 ring_buffer_reset_cpu(buffer, cpu);
2133 ring_buffer_record_enable(buffer);
2136 void tracing_reset_online_cpus(struct array_buffer *buf)
2138 struct trace_buffer *buffer = buf->buffer;
2143 ring_buffer_record_disable(buffer);
2145 /* Make sure all commits have finished */
2148 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2150 ring_buffer_reset_online_cpus(buffer);
2152 ring_buffer_record_enable(buffer);
2155 /* Must have trace_types_lock held */
2156 void tracing_reset_all_online_cpus(void)
2158 struct trace_array *tr;
2160 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2161 if (!tr->clear_trace)
2163 tr->clear_trace = false;
2164 tracing_reset_online_cpus(&tr->array_buffer);
2165 #ifdef CONFIG_TRACER_MAX_TRACE
2166 tracing_reset_online_cpus(&tr->max_buffer);
2172 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2173 * is the tgid last observed corresponding to pid=i.
2175 static int *tgid_map;
2177 /* The maximum valid index into tgid_map. */
2178 static size_t tgid_map_max;
2180 #define SAVED_CMDLINES_DEFAULT 128
2181 #define NO_CMDLINE_MAP UINT_MAX
2182 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2183 struct saved_cmdlines_buffer {
2184 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2185 unsigned *map_cmdline_to_pid;
2186 unsigned cmdline_num;
2188 char *saved_cmdlines;
2190 static struct saved_cmdlines_buffer *savedcmd;
2192 static inline char *get_saved_cmdlines(int idx)
2194 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2197 static inline void set_cmdline(int idx, const char *cmdline)
2199 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2202 static int allocate_cmdlines_buffer(unsigned int val,
2203 struct saved_cmdlines_buffer *s)
2205 s->map_cmdline_to_pid = kmalloc_array(val,
2206 sizeof(*s->map_cmdline_to_pid),
2208 if (!s->map_cmdline_to_pid)
2211 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2212 if (!s->saved_cmdlines) {
2213 kfree(s->map_cmdline_to_pid);
2218 s->cmdline_num = val;
2219 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2220 sizeof(s->map_pid_to_cmdline));
2221 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2222 val * sizeof(*s->map_cmdline_to_pid));
2227 static int trace_create_savedcmd(void)
2231 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2235 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2245 int is_tracing_stopped(void)
2247 return global_trace.stop_count;
2251 * tracing_start - quick start of the tracer
2253 * If tracing is enabled but was stopped by tracing_stop,
2254 * this will start the tracer back up.
2256 void tracing_start(void)
2258 struct trace_buffer *buffer;
2259 unsigned long flags;
2261 if (tracing_disabled)
2264 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2265 if (--global_trace.stop_count) {
2266 if (global_trace.stop_count < 0) {
2267 /* Someone screwed up their debugging */
2269 global_trace.stop_count = 0;
2274 /* Prevent the buffers from switching */
2275 arch_spin_lock(&global_trace.max_lock);
2277 buffer = global_trace.array_buffer.buffer;
2279 ring_buffer_record_enable(buffer);
2281 #ifdef CONFIG_TRACER_MAX_TRACE
2282 buffer = global_trace.max_buffer.buffer;
2284 ring_buffer_record_enable(buffer);
2287 arch_spin_unlock(&global_trace.max_lock);
2290 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2293 static void tracing_start_tr(struct trace_array *tr)
2295 struct trace_buffer *buffer;
2296 unsigned long flags;
2298 if (tracing_disabled)
2301 /* If global, we need to also start the max tracer */
2302 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2303 return tracing_start();
2305 raw_spin_lock_irqsave(&tr->start_lock, flags);
2307 if (--tr->stop_count) {
2308 if (tr->stop_count < 0) {
2309 /* Someone screwed up their debugging */
2316 buffer = tr->array_buffer.buffer;
2318 ring_buffer_record_enable(buffer);
2321 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2325 * tracing_stop - quick stop of the tracer
2327 * Light weight way to stop tracing. Use in conjunction with
2330 void tracing_stop(void)
2332 struct trace_buffer *buffer;
2333 unsigned long flags;
2335 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2336 if (global_trace.stop_count++)
2339 /* Prevent the buffers from switching */
2340 arch_spin_lock(&global_trace.max_lock);
2342 buffer = global_trace.array_buffer.buffer;
2344 ring_buffer_record_disable(buffer);
2346 #ifdef CONFIG_TRACER_MAX_TRACE
2347 buffer = global_trace.max_buffer.buffer;
2349 ring_buffer_record_disable(buffer);
2352 arch_spin_unlock(&global_trace.max_lock);
2355 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2358 static void tracing_stop_tr(struct trace_array *tr)
2360 struct trace_buffer *buffer;
2361 unsigned long flags;
2363 /* If global, we need to also stop the max tracer */
2364 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2365 return tracing_stop();
2367 raw_spin_lock_irqsave(&tr->start_lock, flags);
2368 if (tr->stop_count++)
2371 buffer = tr->array_buffer.buffer;
2373 ring_buffer_record_disable(buffer);
2376 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2379 static int trace_save_cmdline(struct task_struct *tsk)
2383 /* treat recording of idle task as a success */
2387 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2390 * It's not the end of the world if we don't get
2391 * the lock, but we also don't want to spin
2392 * nor do we want to disable interrupts,
2393 * so if we miss here, then better luck next time.
2395 if (!arch_spin_trylock(&trace_cmdline_lock))
2398 idx = savedcmd->map_pid_to_cmdline[tpid];
2399 if (idx == NO_CMDLINE_MAP) {
2400 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2402 savedcmd->map_pid_to_cmdline[tpid] = idx;
2403 savedcmd->cmdline_idx = idx;
2406 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2407 set_cmdline(idx, tsk->comm);
2409 arch_spin_unlock(&trace_cmdline_lock);
2414 static void __trace_find_cmdline(int pid, char comm[])
2420 strcpy(comm, "<idle>");
2424 if (WARN_ON_ONCE(pid < 0)) {
2425 strcpy(comm, "<XXX>");
2429 tpid = pid & (PID_MAX_DEFAULT - 1);
2430 map = savedcmd->map_pid_to_cmdline[tpid];
2431 if (map != NO_CMDLINE_MAP) {
2432 tpid = savedcmd->map_cmdline_to_pid[map];
2434 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2438 strcpy(comm, "<...>");
2441 void trace_find_cmdline(int pid, char comm[])
2444 arch_spin_lock(&trace_cmdline_lock);
2446 __trace_find_cmdline(pid, comm);
2448 arch_spin_unlock(&trace_cmdline_lock);
2452 static int *trace_find_tgid_ptr(int pid)
2455 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2456 * if we observe a non-NULL tgid_map then we also observe the correct
2459 int *map = smp_load_acquire(&tgid_map);
2461 if (unlikely(!map || pid > tgid_map_max))
2467 int trace_find_tgid(int pid)
2469 int *ptr = trace_find_tgid_ptr(pid);
2471 return ptr ? *ptr : 0;
2474 static int trace_save_tgid(struct task_struct *tsk)
2478 /* treat recording of idle task as a success */
2482 ptr = trace_find_tgid_ptr(tsk->pid);
2490 static bool tracing_record_taskinfo_skip(int flags)
2492 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2494 if (!__this_cpu_read(trace_taskinfo_save))
2500 * tracing_record_taskinfo - record the task info of a task
2502 * @task: task to record
2503 * @flags: TRACE_RECORD_CMDLINE for recording comm
2504 * TRACE_RECORD_TGID for recording tgid
2506 void tracing_record_taskinfo(struct task_struct *task, int flags)
2510 if (tracing_record_taskinfo_skip(flags))
2514 * Record as much task information as possible. If some fail, continue
2515 * to try to record the others.
2517 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2518 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2520 /* If recording any information failed, retry again soon. */
2524 __this_cpu_write(trace_taskinfo_save, false);
2528 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2530 * @prev: previous task during sched_switch
2531 * @next: next task during sched_switch
2532 * @flags: TRACE_RECORD_CMDLINE for recording comm
2533 * TRACE_RECORD_TGID for recording tgid
2535 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2536 struct task_struct *next, int flags)
2540 if (tracing_record_taskinfo_skip(flags))
2544 * Record as much task information as possible. If some fail, continue
2545 * to try to record the others.
2547 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2548 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2549 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2550 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2552 /* If recording any information failed, retry again soon. */
2556 __this_cpu_write(trace_taskinfo_save, false);
2559 /* Helpers to record a specific task information */
2560 void tracing_record_cmdline(struct task_struct *task)
2562 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2565 void tracing_record_tgid(struct task_struct *task)
2567 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2571 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2572 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2573 * simplifies those functions and keeps them in sync.
2575 enum print_line_t trace_handle_return(struct trace_seq *s)
2577 return trace_seq_has_overflowed(s) ?
2578 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2580 EXPORT_SYMBOL_GPL(trace_handle_return);
2582 static unsigned short migration_disable_value(void)
2584 #if defined(CONFIG_SMP)
2585 return current->migration_disabled;
2591 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2593 unsigned int trace_flags = irqs_status;
2596 pc = preempt_count();
2599 trace_flags |= TRACE_FLAG_NMI;
2600 if (pc & HARDIRQ_MASK)
2601 trace_flags |= TRACE_FLAG_HARDIRQ;
2602 if (in_serving_softirq())
2603 trace_flags |= TRACE_FLAG_SOFTIRQ;
2605 if (tif_need_resched())
2606 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2607 if (test_preempt_need_resched())
2608 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2609 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2610 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2613 struct ring_buffer_event *
2614 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2617 unsigned int trace_ctx)
2619 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2622 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2623 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2624 static int trace_buffered_event_ref;
2627 * trace_buffered_event_enable - enable buffering events
2629 * When events are being filtered, it is quicker to use a temporary
2630 * buffer to write the event data into if there's a likely chance
2631 * that it will not be committed. The discard of the ring buffer
2632 * is not as fast as committing, and is much slower than copying
2635 * When an event is to be filtered, allocate per cpu buffers to
2636 * write the event data into, and if the event is filtered and discarded
2637 * it is simply dropped, otherwise, the entire data is to be committed
2640 void trace_buffered_event_enable(void)
2642 struct ring_buffer_event *event;
2646 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2648 if (trace_buffered_event_ref++)
2651 for_each_tracing_cpu(cpu) {
2652 page = alloc_pages_node(cpu_to_node(cpu),
2653 GFP_KERNEL | __GFP_NORETRY, 0);
2657 event = page_address(page);
2658 memset(event, 0, sizeof(*event));
2660 per_cpu(trace_buffered_event, cpu) = event;
2663 if (cpu == smp_processor_id() &&
2664 __this_cpu_read(trace_buffered_event) !=
2665 per_cpu(trace_buffered_event, cpu))
2672 trace_buffered_event_disable();
2675 static void enable_trace_buffered_event(void *data)
2677 /* Probably not needed, but do it anyway */
2679 this_cpu_dec(trace_buffered_event_cnt);
2682 static void disable_trace_buffered_event(void *data)
2684 this_cpu_inc(trace_buffered_event_cnt);
2688 * trace_buffered_event_disable - disable buffering events
2690 * When a filter is removed, it is faster to not use the buffered
2691 * events, and to commit directly into the ring buffer. Free up
2692 * the temp buffers when there are no more users. This requires
2693 * special synchronization with current events.
2695 void trace_buffered_event_disable(void)
2699 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2701 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2704 if (--trace_buffered_event_ref)
2708 /* For each CPU, set the buffer as used. */
2709 smp_call_function_many(tracing_buffer_mask,
2710 disable_trace_buffered_event, NULL, 1);
2713 /* Wait for all current users to finish */
2716 for_each_tracing_cpu(cpu) {
2717 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2718 per_cpu(trace_buffered_event, cpu) = NULL;
2721 * Make sure trace_buffered_event is NULL before clearing
2722 * trace_buffered_event_cnt.
2727 /* Do the work on each cpu */
2728 smp_call_function_many(tracing_buffer_mask,
2729 enable_trace_buffered_event, NULL, 1);
2733 static struct trace_buffer *temp_buffer;
2735 struct ring_buffer_event *
2736 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2737 struct trace_event_file *trace_file,
2738 int type, unsigned long len,
2739 unsigned int trace_ctx)
2741 struct ring_buffer_event *entry;
2742 struct trace_array *tr = trace_file->tr;
2745 *current_rb = tr->array_buffer.buffer;
2747 if (!tr->no_filter_buffering_ref &&
2748 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2749 (entry = this_cpu_read(trace_buffered_event))) {
2751 * Filtering is on, so try to use the per cpu buffer first.
2752 * This buffer will simulate a ring_buffer_event,
2753 * where the type_len is zero and the array[0] will
2754 * hold the full length.
2755 * (see include/linux/ring-buffer.h for details on
2756 * how the ring_buffer_event is structured).
2758 * Using a temp buffer during filtering and copying it
2759 * on a matched filter is quicker than writing directly
2760 * into the ring buffer and then discarding it when
2761 * it doesn't match. That is because the discard
2762 * requires several atomic operations to get right.
2763 * Copying on match and doing nothing on a failed match
2764 * is still quicker than no copy on match, but having
2765 * to discard out of the ring buffer on a failed match.
2767 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2769 val = this_cpu_inc_return(trace_buffered_event_cnt);
2772 * Preemption is disabled, but interrupts and NMIs
2773 * can still come in now. If that happens after
2774 * the above increment, then it will have to go
2775 * back to the old method of allocating the event
2776 * on the ring buffer, and if the filter fails, it
2777 * will have to call ring_buffer_discard_commit()
2780 * Need to also check the unlikely case that the
2781 * length is bigger than the temp buffer size.
2782 * If that happens, then the reserve is pretty much
2783 * guaranteed to fail, as the ring buffer currently
2784 * only allows events less than a page. But that may
2785 * change in the future, so let the ring buffer reserve
2786 * handle the failure in that case.
2788 if (val == 1 && likely(len <= max_len)) {
2789 trace_event_setup(entry, type, trace_ctx);
2790 entry->array[0] = len;
2793 this_cpu_dec(trace_buffered_event_cnt);
2796 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2799 * If tracing is off, but we have triggers enabled
2800 * we still need to look at the event data. Use the temp_buffer
2801 * to store the trace event for the trigger to use. It's recursive
2802 * safe and will not be recorded anywhere.
2804 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2805 *current_rb = temp_buffer;
2806 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2811 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2813 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2814 static DEFINE_MUTEX(tracepoint_printk_mutex);
2816 static void output_printk(struct trace_event_buffer *fbuffer)
2818 struct trace_event_call *event_call;
2819 struct trace_event_file *file;
2820 struct trace_event *event;
2821 unsigned long flags;
2822 struct trace_iterator *iter = tracepoint_print_iter;
2824 /* We should never get here if iter is NULL */
2825 if (WARN_ON_ONCE(!iter))
2828 event_call = fbuffer->trace_file->event_call;
2829 if (!event_call || !event_call->event.funcs ||
2830 !event_call->event.funcs->trace)
2833 file = fbuffer->trace_file;
2834 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2835 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2836 !filter_match_preds(file->filter, fbuffer->entry)))
2839 event = &fbuffer->trace_file->event_call->event;
2841 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2842 trace_seq_init(&iter->seq);
2843 iter->ent = fbuffer->entry;
2844 event_call->event.funcs->trace(iter, 0, event);
2845 trace_seq_putc(&iter->seq, 0);
2846 printk("%s", iter->seq.buffer);
2848 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2851 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2852 void *buffer, size_t *lenp,
2855 int save_tracepoint_printk;
2858 mutex_lock(&tracepoint_printk_mutex);
2859 save_tracepoint_printk = tracepoint_printk;
2861 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2864 * This will force exiting early, as tracepoint_printk
2865 * is always zero when tracepoint_printk_iter is not allocated
2867 if (!tracepoint_print_iter)
2868 tracepoint_printk = 0;
2870 if (save_tracepoint_printk == tracepoint_printk)
2873 if (tracepoint_printk)
2874 static_key_enable(&tracepoint_printk_key.key);
2876 static_key_disable(&tracepoint_printk_key.key);
2879 mutex_unlock(&tracepoint_printk_mutex);
2884 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2886 enum event_trigger_type tt = ETT_NONE;
2887 struct trace_event_file *file = fbuffer->trace_file;
2889 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2890 fbuffer->entry, &tt))
2893 if (static_key_false(&tracepoint_printk_key.key))
2894 output_printk(fbuffer);
2896 if (static_branch_unlikely(&trace_event_exports_enabled))
2897 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2899 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2900 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2904 event_triggers_post_call(file, tt);
2907 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2912 * trace_buffer_unlock_commit_regs()
2913 * trace_event_buffer_commit()
2914 * trace_event_raw_event_xxx()
2916 # define STACK_SKIP 3
2918 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2919 struct trace_buffer *buffer,
2920 struct ring_buffer_event *event,
2921 unsigned int trace_ctx,
2922 struct pt_regs *regs)
2924 __buffer_unlock_commit(buffer, event);
2927 * If regs is not set, then skip the necessary functions.
2928 * Note, we can still get here via blktrace, wakeup tracer
2929 * and mmiotrace, but that's ok if they lose a function or
2930 * two. They are not that meaningful.
2932 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2933 ftrace_trace_userstack(tr, buffer, trace_ctx);
2937 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2940 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2941 struct ring_buffer_event *event)
2943 __buffer_unlock_commit(buffer, event);
2947 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2948 parent_ip, unsigned int trace_ctx)
2950 struct trace_event_call *call = &event_function;
2951 struct trace_buffer *buffer = tr->array_buffer.buffer;
2952 struct ring_buffer_event *event;
2953 struct ftrace_entry *entry;
2955 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2959 entry = ring_buffer_event_data(event);
2961 entry->parent_ip = parent_ip;
2963 if (!call_filter_check_discard(call, entry, buffer, event)) {
2964 if (static_branch_unlikely(&trace_function_exports_enabled))
2965 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2966 __buffer_unlock_commit(buffer, event);
2970 #ifdef CONFIG_STACKTRACE
2972 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2973 #define FTRACE_KSTACK_NESTING 4
2975 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2977 struct ftrace_stack {
2978 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2982 struct ftrace_stacks {
2983 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2986 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2987 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2989 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2990 unsigned int trace_ctx,
2991 int skip, struct pt_regs *regs)
2993 struct trace_event_call *call = &event_kernel_stack;
2994 struct ring_buffer_event *event;
2995 unsigned int size, nr_entries;
2996 struct ftrace_stack *fstack;
2997 struct stack_entry *entry;
3001 * Add one, for this function and the call to save_stack_trace()
3002 * If regs is set, then these functions will not be in the way.
3004 #ifndef CONFIG_UNWINDER_ORC
3009 preempt_disable_notrace();
3011 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3013 /* This should never happen. If it does, yell once and skip */
3014 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3018 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3019 * interrupt will either see the value pre increment or post
3020 * increment. If the interrupt happens pre increment it will have
3021 * restored the counter when it returns. We just need a barrier to
3022 * keep gcc from moving things around.
3026 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3027 size = ARRAY_SIZE(fstack->calls);
3030 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3033 nr_entries = stack_trace_save(fstack->calls, size, skip);
3036 size = nr_entries * sizeof(unsigned long);
3037 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3038 (sizeof(*entry) - sizeof(entry->caller)) + size,
3042 entry = ring_buffer_event_data(event);
3044 memcpy(&entry->caller, fstack->calls, size);
3045 entry->size = nr_entries;
3047 if (!call_filter_check_discard(call, entry, buffer, event))
3048 __buffer_unlock_commit(buffer, event);
3051 /* Again, don't let gcc optimize things here */
3053 __this_cpu_dec(ftrace_stack_reserve);
3054 preempt_enable_notrace();
3058 static inline void ftrace_trace_stack(struct trace_array *tr,
3059 struct trace_buffer *buffer,
3060 unsigned int trace_ctx,
3061 int skip, struct pt_regs *regs)
3063 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3066 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3069 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3072 struct trace_buffer *buffer = tr->array_buffer.buffer;
3074 if (rcu_is_watching()) {
3075 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3080 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3081 * but if the above rcu_is_watching() failed, then the NMI
3082 * triggered someplace critical, and rcu_irq_enter() should
3083 * not be called from NMI.
3085 if (unlikely(in_nmi()))
3088 rcu_irq_enter_irqson();
3089 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3090 rcu_irq_exit_irqson();
3094 * trace_dump_stack - record a stack back trace in the trace buffer
3095 * @skip: Number of functions to skip (helper handlers)
3097 void trace_dump_stack(int skip)
3099 if (tracing_disabled || tracing_selftest_running)
3102 #ifndef CONFIG_UNWINDER_ORC
3103 /* Skip 1 to skip this function. */
3106 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3107 tracing_gen_ctx(), skip, NULL);
3109 EXPORT_SYMBOL_GPL(trace_dump_stack);
3111 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3112 static DEFINE_PER_CPU(int, user_stack_count);
3115 ftrace_trace_userstack(struct trace_array *tr,
3116 struct trace_buffer *buffer, unsigned int trace_ctx)
3118 struct trace_event_call *call = &event_user_stack;
3119 struct ring_buffer_event *event;
3120 struct userstack_entry *entry;
3122 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3126 * NMIs can not handle page faults, even with fix ups.
3127 * The save user stack can (and often does) fault.
3129 if (unlikely(in_nmi()))
3133 * prevent recursion, since the user stack tracing may
3134 * trigger other kernel events.
3137 if (__this_cpu_read(user_stack_count))
3140 __this_cpu_inc(user_stack_count);
3142 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3143 sizeof(*entry), trace_ctx);
3145 goto out_drop_count;
3146 entry = ring_buffer_event_data(event);
3148 entry->tgid = current->tgid;
3149 memset(&entry->caller, 0, sizeof(entry->caller));
3151 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3152 if (!call_filter_check_discard(call, entry, buffer, event))
3153 __buffer_unlock_commit(buffer, event);
3156 __this_cpu_dec(user_stack_count);
3160 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3161 static void ftrace_trace_userstack(struct trace_array *tr,
3162 struct trace_buffer *buffer,
3163 unsigned int trace_ctx)
3166 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3168 #endif /* CONFIG_STACKTRACE */
3171 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3172 unsigned long long delta)
3174 entry->bottom_delta_ts = delta & U32_MAX;
3175 entry->top_delta_ts = (delta >> 32);
3178 void trace_last_func_repeats(struct trace_array *tr,
3179 struct trace_func_repeats *last_info,
3180 unsigned int trace_ctx)
3182 struct trace_buffer *buffer = tr->array_buffer.buffer;
3183 struct func_repeats_entry *entry;
3184 struct ring_buffer_event *event;
3187 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3188 sizeof(*entry), trace_ctx);
3192 delta = ring_buffer_event_time_stamp(buffer, event) -
3193 last_info->ts_last_call;
3195 entry = ring_buffer_event_data(event);
3196 entry->ip = last_info->ip;
3197 entry->parent_ip = last_info->parent_ip;
3198 entry->count = last_info->count;
3199 func_repeats_set_delta_ts(entry, delta);
3201 __buffer_unlock_commit(buffer, event);
3204 /* created for use with alloc_percpu */
3205 struct trace_buffer_struct {
3207 char buffer[4][TRACE_BUF_SIZE];
3210 static struct trace_buffer_struct *trace_percpu_buffer;
3213 * This allows for lockless recording. If we're nested too deeply, then
3214 * this returns NULL.
3216 static char *get_trace_buf(void)
3218 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3220 if (!buffer || buffer->nesting >= 4)
3225 /* Interrupts must see nesting incremented before we use the buffer */
3227 return &buffer->buffer[buffer->nesting - 1][0];
3230 static void put_trace_buf(void)
3232 /* Don't let the decrement of nesting leak before this */
3234 this_cpu_dec(trace_percpu_buffer->nesting);
3237 static int alloc_percpu_trace_buffer(void)
3239 struct trace_buffer_struct *buffers;
3241 if (trace_percpu_buffer)
3244 buffers = alloc_percpu(struct trace_buffer_struct);
3245 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3248 trace_percpu_buffer = buffers;
3252 static int buffers_allocated;
3254 void trace_printk_init_buffers(void)
3256 if (buffers_allocated)
3259 if (alloc_percpu_trace_buffer())
3262 /* trace_printk() is for debug use only. Don't use it in production. */
3265 pr_warn("**********************************************************\n");
3266 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3268 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3270 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3271 pr_warn("** unsafe for production use. **\n");
3273 pr_warn("** If you see this message and you are not debugging **\n");
3274 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3276 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3277 pr_warn("**********************************************************\n");
3279 /* Expand the buffers to set size */
3280 tracing_update_buffers();
3282 buffers_allocated = 1;
3285 * trace_printk_init_buffers() can be called by modules.
3286 * If that happens, then we need to start cmdline recording
3287 * directly here. If the global_trace.buffer is already
3288 * allocated here, then this was called by module code.
3290 if (global_trace.array_buffer.buffer)
3291 tracing_start_cmdline_record();
3293 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3295 void trace_printk_start_comm(void)
3297 /* Start tracing comms if trace printk is set */
3298 if (!buffers_allocated)
3300 tracing_start_cmdline_record();
3303 static void trace_printk_start_stop_comm(int enabled)
3305 if (!buffers_allocated)
3309 tracing_start_cmdline_record();
3311 tracing_stop_cmdline_record();
3315 * trace_vbprintk - write binary msg to tracing buffer
3316 * @ip: The address of the caller
3317 * @fmt: The string format to write to the buffer
3318 * @args: Arguments for @fmt
3320 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3322 struct trace_event_call *call = &event_bprint;
3323 struct ring_buffer_event *event;
3324 struct trace_buffer *buffer;
3325 struct trace_array *tr = &global_trace;
3326 struct bprint_entry *entry;
3327 unsigned int trace_ctx;
3331 if (unlikely(tracing_selftest_running || tracing_disabled))
3334 /* Don't pollute graph traces with trace_vprintk internals */
3335 pause_graph_tracing();
3337 trace_ctx = tracing_gen_ctx();
3338 preempt_disable_notrace();
3340 tbuffer = get_trace_buf();
3346 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3348 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3351 size = sizeof(*entry) + sizeof(u32) * len;
3352 buffer = tr->array_buffer.buffer;
3353 ring_buffer_nest_start(buffer);
3354 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3358 entry = ring_buffer_event_data(event);
3362 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3363 if (!call_filter_check_discard(call, entry, buffer, event)) {
3364 __buffer_unlock_commit(buffer, event);
3365 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3369 ring_buffer_nest_end(buffer);
3374 preempt_enable_notrace();
3375 unpause_graph_tracing();
3379 EXPORT_SYMBOL_GPL(trace_vbprintk);
3383 __trace_array_vprintk(struct trace_buffer *buffer,
3384 unsigned long ip, const char *fmt, va_list args)
3386 struct trace_event_call *call = &event_print;
3387 struct ring_buffer_event *event;
3389 struct print_entry *entry;
3390 unsigned int trace_ctx;
3393 if (tracing_disabled || tracing_selftest_running)
3396 /* Don't pollute graph traces with trace_vprintk internals */
3397 pause_graph_tracing();
3399 trace_ctx = tracing_gen_ctx();
3400 preempt_disable_notrace();
3403 tbuffer = get_trace_buf();
3409 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3411 size = sizeof(*entry) + len + 1;
3412 ring_buffer_nest_start(buffer);
3413 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3417 entry = ring_buffer_event_data(event);
3420 memcpy(&entry->buf, tbuffer, len + 1);
3421 if (!call_filter_check_discard(call, entry, buffer, event)) {
3422 __buffer_unlock_commit(buffer, event);
3423 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3427 ring_buffer_nest_end(buffer);
3431 preempt_enable_notrace();
3432 unpause_graph_tracing();
3438 int trace_array_vprintk(struct trace_array *tr,
3439 unsigned long ip, const char *fmt, va_list args)
3441 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3445 * trace_array_printk - Print a message to a specific instance
3446 * @tr: The instance trace_array descriptor
3447 * @ip: The instruction pointer that this is called from.
3448 * @fmt: The format to print (printf format)
3450 * If a subsystem sets up its own instance, they have the right to
3451 * printk strings into their tracing instance buffer using this
3452 * function. Note, this function will not write into the top level
3453 * buffer (use trace_printk() for that), as writing into the top level
3454 * buffer should only have events that can be individually disabled.
3455 * trace_printk() is only used for debugging a kernel, and should not
3456 * be ever incorporated in normal use.
3458 * trace_array_printk() can be used, as it will not add noise to the
3459 * top level tracing buffer.
3461 * Note, trace_array_init_printk() must be called on @tr before this
3465 int trace_array_printk(struct trace_array *tr,
3466 unsigned long ip, const char *fmt, ...)
3474 /* This is only allowed for created instances */
3475 if (tr == &global_trace)
3478 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3482 ret = trace_array_vprintk(tr, ip, fmt, ap);
3486 EXPORT_SYMBOL_GPL(trace_array_printk);
3489 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3490 * @tr: The trace array to initialize the buffers for
3492 * As trace_array_printk() only writes into instances, they are OK to
3493 * have in the kernel (unlike trace_printk()). This needs to be called
3494 * before trace_array_printk() can be used on a trace_array.
3496 int trace_array_init_printk(struct trace_array *tr)
3501 /* This is only allowed for created instances */
3502 if (tr == &global_trace)
3505 return alloc_percpu_trace_buffer();
3507 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3510 int trace_array_printk_buf(struct trace_buffer *buffer,
3511 unsigned long ip, const char *fmt, ...)
3516 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3520 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3526 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3528 return trace_array_vprintk(&global_trace, ip, fmt, args);
3530 EXPORT_SYMBOL_GPL(trace_vprintk);
3532 static void trace_iterator_increment(struct trace_iterator *iter)
3534 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3538 ring_buffer_iter_advance(buf_iter);
3541 static struct trace_entry *
3542 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3543 unsigned long *lost_events)
3545 struct ring_buffer_event *event;
3546 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3549 event = ring_buffer_iter_peek(buf_iter, ts);
3551 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3552 (unsigned long)-1 : 0;
3554 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3559 iter->ent_size = ring_buffer_event_length(event);
3560 return ring_buffer_event_data(event);
3566 static struct trace_entry *
3567 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3568 unsigned long *missing_events, u64 *ent_ts)
3570 struct trace_buffer *buffer = iter->array_buffer->buffer;
3571 struct trace_entry *ent, *next = NULL;
3572 unsigned long lost_events = 0, next_lost = 0;
3573 int cpu_file = iter->cpu_file;
3574 u64 next_ts = 0, ts;
3580 * If we are in a per_cpu trace file, don't bother by iterating over
3581 * all cpu and peek directly.
3583 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3584 if (ring_buffer_empty_cpu(buffer, cpu_file))
3586 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3588 *ent_cpu = cpu_file;
3593 for_each_tracing_cpu(cpu) {
3595 if (ring_buffer_empty_cpu(buffer, cpu))
3598 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3601 * Pick the entry with the smallest timestamp:
3603 if (ent && (!next || ts < next_ts)) {
3607 next_lost = lost_events;
3608 next_size = iter->ent_size;
3612 iter->ent_size = next_size;
3615 *ent_cpu = next_cpu;
3621 *missing_events = next_lost;
3626 #define STATIC_FMT_BUF_SIZE 128
3627 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3629 static char *trace_iter_expand_format(struct trace_iterator *iter)
3634 * iter->tr is NULL when used with tp_printk, which makes
3635 * this get called where it is not safe to call krealloc().
3637 if (!iter->tr || iter->fmt == static_fmt_buf)
3640 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3643 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3650 /* Returns true if the string is safe to dereference from an event */
3651 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3653 unsigned long addr = (unsigned long)str;
3654 struct trace_event *trace_event;
3655 struct trace_event_call *event;
3657 /* OK if part of the event data */
3658 if ((addr >= (unsigned long)iter->ent) &&
3659 (addr < (unsigned long)iter->ent + iter->ent_size))
3662 /* OK if part of the temp seq buffer */
3663 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3664 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3667 /* Core rodata can not be freed */
3668 if (is_kernel_rodata(addr))
3671 if (trace_is_tracepoint_string(str))
3675 * Now this could be a module event, referencing core module
3676 * data, which is OK.
3681 trace_event = ftrace_find_event(iter->ent->type);
3685 event = container_of(trace_event, struct trace_event_call, event);
3686 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3689 /* Would rather have rodata, but this will suffice */
3690 if (within_module_core(addr, event->module))
3696 static const char *show_buffer(struct trace_seq *s)
3698 struct seq_buf *seq = &s->seq;
3700 seq_buf_terminate(seq);
3705 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3707 static int test_can_verify_check(const char *fmt, ...)
3714 * The verifier is dependent on vsnprintf() modifies the va_list
3715 * passed to it, where it is sent as a reference. Some architectures
3716 * (like x86_32) passes it by value, which means that vsnprintf()
3717 * does not modify the va_list passed to it, and the verifier
3718 * would then need to be able to understand all the values that
3719 * vsnprintf can use. If it is passed by value, then the verifier
3723 vsnprintf(buf, 16, "%d", ap);
3724 ret = va_arg(ap, int);
3730 static void test_can_verify(void)
3732 if (!test_can_verify_check("%d %d", 0, 1)) {
3733 pr_info("trace event string verifier disabled\n");
3734 static_branch_inc(&trace_no_verify);
3739 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3740 * @iter: The iterator that holds the seq buffer and the event being printed
3741 * @fmt: The format used to print the event
3742 * @ap: The va_list holding the data to print from @fmt.
3744 * This writes the data into the @iter->seq buffer using the data from
3745 * @fmt and @ap. If the format has a %s, then the source of the string
3746 * is examined to make sure it is safe to print, otherwise it will
3747 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3750 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3753 const char *p = fmt;
3757 if (WARN_ON_ONCE(!fmt))
3760 if (static_branch_unlikely(&trace_no_verify))
3763 /* Don't bother checking when doing a ftrace_dump() */
3764 if (iter->fmt == static_fmt_buf)
3773 /* We only care about %s and variants */
3774 for (i = 0; p[i]; i++) {
3775 if (i + 1 >= iter->fmt_size) {
3777 * If we can't expand the copy buffer,
3780 if (!trace_iter_expand_format(iter))
3784 if (p[i] == '\\' && p[i+1]) {
3789 /* Need to test cases like %08.*s */
3790 for (j = 1; p[i+j]; j++) {
3791 if (isdigit(p[i+j]) ||
3794 if (p[i+j] == '*') {
3806 /* If no %s found then just print normally */
3810 /* Copy up to the %s, and print that */
3811 strncpy(iter->fmt, p, i);
3812 iter->fmt[i] = '\0';
3813 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3816 len = va_arg(ap, int);
3818 /* The ap now points to the string data of the %s */
3819 str = va_arg(ap, const char *);
3822 * If you hit this warning, it is likely that the
3823 * trace event in question used %s on a string that
3824 * was saved at the time of the event, but may not be
3825 * around when the trace is read. Use __string(),
3826 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3827 * instead. See samples/trace_events/trace-events-sample.h
3830 if (WARN_ONCE(!trace_safe_str(iter, str),
3831 "fmt: '%s' current_buffer: '%s'",
3832 fmt, show_buffer(&iter->seq))) {
3835 /* Try to safely read the string */
3837 if (len + 1 > iter->fmt_size)
3838 len = iter->fmt_size - 1;
3841 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3845 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3849 trace_seq_printf(&iter->seq, "(0x%px)", str);
3851 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3853 str = "[UNSAFE-MEMORY]";
3854 strcpy(iter->fmt, "%s");
3856 strncpy(iter->fmt, p + i, j + 1);
3857 iter->fmt[j+1] = '\0';
3860 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3862 trace_seq_printf(&iter->seq, iter->fmt, str);
3868 trace_seq_vprintf(&iter->seq, p, ap);
3871 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3873 const char *p, *new_fmt;
3876 if (WARN_ON_ONCE(!fmt))
3879 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3883 new_fmt = q = iter->fmt;
3885 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3886 if (!trace_iter_expand_format(iter))
3889 q += iter->fmt - new_fmt;
3890 new_fmt = iter->fmt;
3895 /* Replace %p with %px */
3899 } else if (p[0] == 'p' && !isalnum(p[1])) {
3910 #define STATIC_TEMP_BUF_SIZE 128
3911 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3913 /* Find the next real entry, without updating the iterator itself */
3914 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3915 int *ent_cpu, u64 *ent_ts)
3917 /* __find_next_entry will reset ent_size */
3918 int ent_size = iter->ent_size;
3919 struct trace_entry *entry;
3922 * If called from ftrace_dump(), then the iter->temp buffer
3923 * will be the static_temp_buf and not created from kmalloc.
3924 * If the entry size is greater than the buffer, we can
3925 * not save it. Just return NULL in that case. This is only
3926 * used to add markers when two consecutive events' time
3927 * stamps have a large delta. See trace_print_lat_context()
3929 if (iter->temp == static_temp_buf &&
3930 STATIC_TEMP_BUF_SIZE < ent_size)
3934 * The __find_next_entry() may call peek_next_entry(), which may
3935 * call ring_buffer_peek() that may make the contents of iter->ent
3936 * undefined. Need to copy iter->ent now.
3938 if (iter->ent && iter->ent != iter->temp) {
3939 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3940 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3942 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3947 iter->temp_size = iter->ent_size;
3949 memcpy(iter->temp, iter->ent, iter->ent_size);
3950 iter->ent = iter->temp;
3952 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3953 /* Put back the original ent_size */
3954 iter->ent_size = ent_size;
3959 /* Find the next real entry, and increment the iterator to the next entry */
3960 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3962 iter->ent = __find_next_entry(iter, &iter->cpu,
3963 &iter->lost_events, &iter->ts);
3966 trace_iterator_increment(iter);
3968 return iter->ent ? iter : NULL;
3971 static void trace_consume(struct trace_iterator *iter)
3973 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3974 &iter->lost_events);
3977 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3979 struct trace_iterator *iter = m->private;
3983 WARN_ON_ONCE(iter->leftover);
3987 /* can't go backwards */
3992 ent = trace_find_next_entry_inc(iter);
3996 while (ent && iter->idx < i)
3997 ent = trace_find_next_entry_inc(iter);
4004 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4006 struct ring_buffer_iter *buf_iter;
4007 unsigned long entries = 0;
4010 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4012 buf_iter = trace_buffer_iter(iter, cpu);
4016 ring_buffer_iter_reset(buf_iter);
4019 * We could have the case with the max latency tracers
4020 * that a reset never took place on a cpu. This is evident
4021 * by the timestamp being before the start of the buffer.
4023 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4024 if (ts >= iter->array_buffer->time_start)
4027 ring_buffer_iter_advance(buf_iter);
4030 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4034 * The current tracer is copied to avoid a global locking
4037 static void *s_start(struct seq_file *m, loff_t *pos)
4039 struct trace_iterator *iter = m->private;
4040 struct trace_array *tr = iter->tr;
4041 int cpu_file = iter->cpu_file;
4047 * copy the tracer to avoid using a global lock all around.
4048 * iter->trace is a copy of current_trace, the pointer to the
4049 * name may be used instead of a strcmp(), as iter->trace->name
4050 * will point to the same string as current_trace->name.
4052 mutex_lock(&trace_types_lock);
4053 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4054 *iter->trace = *tr->current_trace;
4055 mutex_unlock(&trace_types_lock);
4057 #ifdef CONFIG_TRACER_MAX_TRACE
4058 if (iter->snapshot && iter->trace->use_max_tr)
4059 return ERR_PTR(-EBUSY);
4062 if (*pos != iter->pos) {
4067 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4068 for_each_tracing_cpu(cpu)
4069 tracing_iter_reset(iter, cpu);
4071 tracing_iter_reset(iter, cpu_file);
4074 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4079 * If we overflowed the seq_file before, then we want
4080 * to just reuse the trace_seq buffer again.
4086 p = s_next(m, p, &l);
4090 trace_event_read_lock();
4091 trace_access_lock(cpu_file);
4095 static void s_stop(struct seq_file *m, void *p)
4097 struct trace_iterator *iter = m->private;
4099 #ifdef CONFIG_TRACER_MAX_TRACE
4100 if (iter->snapshot && iter->trace->use_max_tr)
4104 trace_access_unlock(iter->cpu_file);
4105 trace_event_read_unlock();
4109 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4110 unsigned long *entries, int cpu)
4112 unsigned long count;
4114 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4116 * If this buffer has skipped entries, then we hold all
4117 * entries for the trace and we need to ignore the
4118 * ones before the time stamp.
4120 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4121 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4122 /* total is the same as the entries */
4126 ring_buffer_overrun_cpu(buf->buffer, cpu);
4131 get_total_entries(struct array_buffer *buf,
4132 unsigned long *total, unsigned long *entries)
4140 for_each_tracing_cpu(cpu) {
4141 get_total_entries_cpu(buf, &t, &e, cpu);
4147 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4149 unsigned long total, entries;
4154 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4159 unsigned long trace_total_entries(struct trace_array *tr)
4161 unsigned long total, entries;
4166 get_total_entries(&tr->array_buffer, &total, &entries);
4171 static void print_lat_help_header(struct seq_file *m)
4173 seq_puts(m, "# _------=> CPU# \n"
4174 "# / _-----=> irqs-off \n"
4175 "# | / _----=> need-resched \n"
4176 "# || / _---=> hardirq/softirq \n"
4177 "# ||| / _--=> preempt-depth \n"
4178 "# |||| / _-=> migrate-disable \n"
4179 "# ||||| / delay \n"
4180 "# cmd pid |||||| time | caller \n"
4181 "# \\ / |||||| \\ | / \n");
4184 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4186 unsigned long total;
4187 unsigned long entries;
4189 get_total_entries(buf, &total, &entries);
4190 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4191 entries, total, num_online_cpus());
4195 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4198 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4200 print_event_info(buf, m);
4202 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4203 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4206 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4209 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4210 const char *space = " ";
4211 int prec = tgid ? 12 : 2;
4213 print_event_info(buf, m);
4215 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
4216 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4217 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4218 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4219 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4220 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4221 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4222 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4226 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4228 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4229 struct array_buffer *buf = iter->array_buffer;
4230 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4231 struct tracer *type = iter->trace;
4232 unsigned long entries;
4233 unsigned long total;
4234 const char *name = "preemption";
4238 get_total_entries(buf, &total, &entries);
4240 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4242 seq_puts(m, "# -----------------------------------"
4243 "---------------------------------\n");
4244 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4245 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4246 nsecs_to_usecs(data->saved_latency),
4250 #if defined(CONFIG_PREEMPT_NONE)
4252 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
4254 #elif defined(CONFIG_PREEMPT)
4256 #elif defined(CONFIG_PREEMPT_RT)
4261 /* These are reserved for later use */
4264 seq_printf(m, " #P:%d)\n", num_online_cpus());
4268 seq_puts(m, "# -----------------\n");
4269 seq_printf(m, "# | task: %.16s-%d "
4270 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4271 data->comm, data->pid,
4272 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4273 data->policy, data->rt_priority);
4274 seq_puts(m, "# -----------------\n");
4276 if (data->critical_start) {
4277 seq_puts(m, "# => started at: ");
4278 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4279 trace_print_seq(m, &iter->seq);
4280 seq_puts(m, "\n# => ended at: ");
4281 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4282 trace_print_seq(m, &iter->seq);
4283 seq_puts(m, "\n#\n");
4289 static void test_cpu_buff_start(struct trace_iterator *iter)
4291 struct trace_seq *s = &iter->seq;
4292 struct trace_array *tr = iter->tr;
4294 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4297 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4300 if (cpumask_available(iter->started) &&
4301 cpumask_test_cpu(iter->cpu, iter->started))
4304 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4307 if (cpumask_available(iter->started))
4308 cpumask_set_cpu(iter->cpu, iter->started);
4310 /* Don't print started cpu buffer for the first entry of the trace */
4312 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4316 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4318 struct trace_array *tr = iter->tr;
4319 struct trace_seq *s = &iter->seq;
4320 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4321 struct trace_entry *entry;
4322 struct trace_event *event;
4326 test_cpu_buff_start(iter);
4328 event = ftrace_find_event(entry->type);
4330 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4331 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4332 trace_print_lat_context(iter);
4334 trace_print_context(iter);
4337 if (trace_seq_has_overflowed(s))
4338 return TRACE_TYPE_PARTIAL_LINE;
4341 return event->funcs->trace(iter, sym_flags, event);
4343 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4345 return trace_handle_return(s);
4348 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4350 struct trace_array *tr = iter->tr;
4351 struct trace_seq *s = &iter->seq;
4352 struct trace_entry *entry;
4353 struct trace_event *event;
4357 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4358 trace_seq_printf(s, "%d %d %llu ",
4359 entry->pid, iter->cpu, iter->ts);
4361 if (trace_seq_has_overflowed(s))
4362 return TRACE_TYPE_PARTIAL_LINE;
4364 event = ftrace_find_event(entry->type);
4366 return event->funcs->raw(iter, 0, event);
4368 trace_seq_printf(s, "%d ?\n", entry->type);
4370 return trace_handle_return(s);
4373 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4375 struct trace_array *tr = iter->tr;
4376 struct trace_seq *s = &iter->seq;
4377 unsigned char newline = '\n';
4378 struct trace_entry *entry;
4379 struct trace_event *event;
4383 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4384 SEQ_PUT_HEX_FIELD(s, entry->pid);
4385 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4386 SEQ_PUT_HEX_FIELD(s, iter->ts);
4387 if (trace_seq_has_overflowed(s))
4388 return TRACE_TYPE_PARTIAL_LINE;
4391 event = ftrace_find_event(entry->type);
4393 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4394 if (ret != TRACE_TYPE_HANDLED)
4398 SEQ_PUT_FIELD(s, newline);
4400 return trace_handle_return(s);
4403 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4405 struct trace_array *tr = iter->tr;
4406 struct trace_seq *s = &iter->seq;
4407 struct trace_entry *entry;
4408 struct trace_event *event;
4412 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4413 SEQ_PUT_FIELD(s, entry->pid);
4414 SEQ_PUT_FIELD(s, iter->cpu);
4415 SEQ_PUT_FIELD(s, iter->ts);
4416 if (trace_seq_has_overflowed(s))
4417 return TRACE_TYPE_PARTIAL_LINE;
4420 event = ftrace_find_event(entry->type);
4421 return event ? event->funcs->binary(iter, 0, event) :
4425 int trace_empty(struct trace_iterator *iter)
4427 struct ring_buffer_iter *buf_iter;
4430 /* If we are looking at one CPU buffer, only check that one */
4431 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4432 cpu = iter->cpu_file;
4433 buf_iter = trace_buffer_iter(iter, cpu);
4435 if (!ring_buffer_iter_empty(buf_iter))
4438 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4444 for_each_tracing_cpu(cpu) {
4445 buf_iter = trace_buffer_iter(iter, cpu);
4447 if (!ring_buffer_iter_empty(buf_iter))
4450 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4458 /* Called with trace_event_read_lock() held. */
4459 enum print_line_t print_trace_line(struct trace_iterator *iter)
4461 struct trace_array *tr = iter->tr;
4462 unsigned long trace_flags = tr->trace_flags;
4463 enum print_line_t ret;
4465 if (iter->lost_events) {
4466 if (iter->lost_events == (unsigned long)-1)
4467 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4470 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4471 iter->cpu, iter->lost_events);
4472 if (trace_seq_has_overflowed(&iter->seq))
4473 return TRACE_TYPE_PARTIAL_LINE;
4476 if (iter->trace && iter->trace->print_line) {
4477 ret = iter->trace->print_line(iter);
4478 if (ret != TRACE_TYPE_UNHANDLED)
4482 if (iter->ent->type == TRACE_BPUTS &&
4483 trace_flags & TRACE_ITER_PRINTK &&
4484 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4485 return trace_print_bputs_msg_only(iter);
4487 if (iter->ent->type == TRACE_BPRINT &&
4488 trace_flags & TRACE_ITER_PRINTK &&
4489 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4490 return trace_print_bprintk_msg_only(iter);
4492 if (iter->ent->type == TRACE_PRINT &&
4493 trace_flags & TRACE_ITER_PRINTK &&
4494 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4495 return trace_print_printk_msg_only(iter);
4497 if (trace_flags & TRACE_ITER_BIN)
4498 return print_bin_fmt(iter);
4500 if (trace_flags & TRACE_ITER_HEX)
4501 return print_hex_fmt(iter);
4503 if (trace_flags & TRACE_ITER_RAW)
4504 return print_raw_fmt(iter);
4506 return print_trace_fmt(iter);
4509 void trace_latency_header(struct seq_file *m)
4511 struct trace_iterator *iter = m->private;
4512 struct trace_array *tr = iter->tr;
4514 /* print nothing if the buffers are empty */
4515 if (trace_empty(iter))
4518 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4519 print_trace_header(m, iter);
4521 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4522 print_lat_help_header(m);
4525 void trace_default_header(struct seq_file *m)
4527 struct trace_iterator *iter = m->private;
4528 struct trace_array *tr = iter->tr;
4529 unsigned long trace_flags = tr->trace_flags;
4531 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4534 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4535 /* print nothing if the buffers are empty */
4536 if (trace_empty(iter))
4538 print_trace_header(m, iter);
4539 if (!(trace_flags & TRACE_ITER_VERBOSE))
4540 print_lat_help_header(m);
4542 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4543 if (trace_flags & TRACE_ITER_IRQ_INFO)
4544 print_func_help_header_irq(iter->array_buffer,
4547 print_func_help_header(iter->array_buffer, m,
4553 static void test_ftrace_alive(struct seq_file *m)
4555 if (!ftrace_is_dead())
4557 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4558 "# MAY BE MISSING FUNCTION EVENTS\n");
4561 #ifdef CONFIG_TRACER_MAX_TRACE
4562 static void show_snapshot_main_help(struct seq_file *m)
4564 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4565 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4566 "# Takes a snapshot of the main buffer.\n"
4567 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4568 "# (Doesn't have to be '2' works with any number that\n"
4569 "# is not a '0' or '1')\n");
4572 static void show_snapshot_percpu_help(struct seq_file *m)
4574 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4575 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4576 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4577 "# Takes a snapshot of the main buffer for this cpu.\n");
4579 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4580 "# Must use main snapshot file to allocate.\n");
4582 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4583 "# (Doesn't have to be '2' works with any number that\n"
4584 "# is not a '0' or '1')\n");
4587 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4589 if (iter->tr->allocated_snapshot)
4590 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4592 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4594 seq_puts(m, "# Snapshot commands:\n");
4595 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4596 show_snapshot_main_help(m);
4598 show_snapshot_percpu_help(m);
4601 /* Should never be called */
4602 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4605 static int s_show(struct seq_file *m, void *v)
4607 struct trace_iterator *iter = v;
4610 if (iter->ent == NULL) {
4612 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4614 test_ftrace_alive(m);
4616 if (iter->snapshot && trace_empty(iter))
4617 print_snapshot_help(m, iter);
4618 else if (iter->trace && iter->trace->print_header)
4619 iter->trace->print_header(m);
4621 trace_default_header(m);
4623 } else if (iter->leftover) {
4625 * If we filled the seq_file buffer earlier, we
4626 * want to just show it now.
4628 ret = trace_print_seq(m, &iter->seq);
4630 /* ret should this time be zero, but you never know */
4631 iter->leftover = ret;
4634 print_trace_line(iter);
4635 ret = trace_print_seq(m, &iter->seq);
4637 * If we overflow the seq_file buffer, then it will
4638 * ask us for this data again at start up.
4640 * ret is 0 if seq_file write succeeded.
4643 iter->leftover = ret;
4650 * Should be used after trace_array_get(), trace_types_lock
4651 * ensures that i_cdev was already initialized.
4653 static inline int tracing_get_cpu(struct inode *inode)
4655 if (inode->i_cdev) /* See trace_create_cpu_file() */
4656 return (long)inode->i_cdev - 1;
4657 return RING_BUFFER_ALL_CPUS;
4660 static const struct seq_operations tracer_seq_ops = {
4667 static struct trace_iterator *
4668 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4670 struct trace_array *tr = inode->i_private;
4671 struct trace_iterator *iter;
4674 if (tracing_disabled)
4675 return ERR_PTR(-ENODEV);
4677 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4679 return ERR_PTR(-ENOMEM);
4681 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4683 if (!iter->buffer_iter)
4687 * trace_find_next_entry() may need to save off iter->ent.
4688 * It will place it into the iter->temp buffer. As most
4689 * events are less than 128, allocate a buffer of that size.
4690 * If one is greater, then trace_find_next_entry() will
4691 * allocate a new buffer to adjust for the bigger iter->ent.
4692 * It's not critical if it fails to get allocated here.
4694 iter->temp = kmalloc(128, GFP_KERNEL);
4696 iter->temp_size = 128;
4699 * trace_event_printf() may need to modify given format
4700 * string to replace %p with %px so that it shows real address
4701 * instead of hash value. However, that is only for the event
4702 * tracing, other tracer may not need. Defer the allocation
4703 * until it is needed.
4709 * We make a copy of the current tracer to avoid concurrent
4710 * changes on it while we are reading.
4712 mutex_lock(&trace_types_lock);
4713 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4717 *iter->trace = *tr->current_trace;
4719 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4724 #ifdef CONFIG_TRACER_MAX_TRACE
4725 /* Currently only the top directory has a snapshot */
4726 if (tr->current_trace->print_max || snapshot)
4727 iter->array_buffer = &tr->max_buffer;
4730 iter->array_buffer = &tr->array_buffer;
4731 iter->snapshot = snapshot;
4733 iter->cpu_file = tracing_get_cpu(inode);
4734 mutex_init(&iter->mutex);
4736 /* Notify the tracer early; before we stop tracing. */
4737 if (iter->trace->open)
4738 iter->trace->open(iter);
4740 /* Annotate start of buffers if we had overruns */
4741 if (ring_buffer_overruns(iter->array_buffer->buffer))
4742 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4744 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4745 if (trace_clocks[tr->clock_id].in_ns)
4746 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4749 * If pause-on-trace is enabled, then stop the trace while
4750 * dumping, unless this is the "snapshot" file
4752 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4753 tracing_stop_tr(tr);
4755 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4756 for_each_tracing_cpu(cpu) {
4757 iter->buffer_iter[cpu] =
4758 ring_buffer_read_prepare(iter->array_buffer->buffer,
4761 ring_buffer_read_prepare_sync();
4762 for_each_tracing_cpu(cpu) {
4763 ring_buffer_read_start(iter->buffer_iter[cpu]);
4764 tracing_iter_reset(iter, cpu);
4767 cpu = iter->cpu_file;
4768 iter->buffer_iter[cpu] =
4769 ring_buffer_read_prepare(iter->array_buffer->buffer,
4771 ring_buffer_read_prepare_sync();
4772 ring_buffer_read_start(iter->buffer_iter[cpu]);
4773 tracing_iter_reset(iter, cpu);
4776 mutex_unlock(&trace_types_lock);
4781 mutex_unlock(&trace_types_lock);
4784 kfree(iter->buffer_iter);
4786 seq_release_private(inode, file);
4787 return ERR_PTR(-ENOMEM);
4790 int tracing_open_generic(struct inode *inode, struct file *filp)
4794 ret = tracing_check_open_get_tr(NULL);
4798 filp->private_data = inode->i_private;
4802 bool tracing_is_disabled(void)
4804 return (tracing_disabled) ? true: false;
4808 * Open and update trace_array ref count.
4809 * Must have the current trace_array passed to it.
4811 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4813 struct trace_array *tr = inode->i_private;
4816 ret = tracing_check_open_get_tr(tr);
4820 filp->private_data = inode->i_private;
4825 static int tracing_release(struct inode *inode, struct file *file)
4827 struct trace_array *tr = inode->i_private;
4828 struct seq_file *m = file->private_data;
4829 struct trace_iterator *iter;
4832 if (!(file->f_mode & FMODE_READ)) {
4833 trace_array_put(tr);
4837 /* Writes do not use seq_file */
4839 mutex_lock(&trace_types_lock);
4841 for_each_tracing_cpu(cpu) {
4842 if (iter->buffer_iter[cpu])
4843 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4846 if (iter->trace && iter->trace->close)
4847 iter->trace->close(iter);
4849 if (!iter->snapshot && tr->stop_count)
4850 /* reenable tracing if it was previously enabled */
4851 tracing_start_tr(tr);
4853 __trace_array_put(tr);
4855 mutex_unlock(&trace_types_lock);
4857 mutex_destroy(&iter->mutex);
4858 free_cpumask_var(iter->started);
4862 kfree(iter->buffer_iter);
4863 seq_release_private(inode, file);
4868 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4870 struct trace_array *tr = inode->i_private;
4872 trace_array_put(tr);
4876 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4878 struct trace_array *tr = inode->i_private;
4880 trace_array_put(tr);
4882 return single_release(inode, file);
4885 static int tracing_open(struct inode *inode, struct file *file)
4887 struct trace_array *tr = inode->i_private;
4888 struct trace_iterator *iter;
4891 ret = tracing_check_open_get_tr(tr);
4895 /* If this file was open for write, then erase contents */
4896 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4897 int cpu = tracing_get_cpu(inode);
4898 struct array_buffer *trace_buf = &tr->array_buffer;
4900 #ifdef CONFIG_TRACER_MAX_TRACE
4901 if (tr->current_trace->print_max)
4902 trace_buf = &tr->max_buffer;
4905 if (cpu == RING_BUFFER_ALL_CPUS)
4906 tracing_reset_online_cpus(trace_buf);
4908 tracing_reset_cpu(trace_buf, cpu);
4911 if (file->f_mode & FMODE_READ) {
4912 iter = __tracing_open(inode, file, false);
4914 ret = PTR_ERR(iter);
4915 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4916 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4920 trace_array_put(tr);
4926 * Some tracers are not suitable for instance buffers.
4927 * A tracer is always available for the global array (toplevel)
4928 * or if it explicitly states that it is.
4931 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4933 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4936 /* Find the next tracer that this trace array may use */
4937 static struct tracer *
4938 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4940 while (t && !trace_ok_for_array(t, tr))
4947 t_next(struct seq_file *m, void *v, loff_t *pos)
4949 struct trace_array *tr = m->private;
4950 struct tracer *t = v;
4955 t = get_tracer_for_array(tr, t->next);
4960 static void *t_start(struct seq_file *m, loff_t *pos)
4962 struct trace_array *tr = m->private;
4966 mutex_lock(&trace_types_lock);
4968 t = get_tracer_for_array(tr, trace_types);
4969 for (; t && l < *pos; t = t_next(m, t, &l))
4975 static void t_stop(struct seq_file *m, void *p)
4977 mutex_unlock(&trace_types_lock);
4980 static int t_show(struct seq_file *m, void *v)
4982 struct tracer *t = v;
4987 seq_puts(m, t->name);
4996 static const struct seq_operations show_traces_seq_ops = {
5003 static int show_traces_open(struct inode *inode, struct file *file)
5005 struct trace_array *tr = inode->i_private;
5009 ret = tracing_check_open_get_tr(tr);
5013 ret = seq_open(file, &show_traces_seq_ops);
5015 trace_array_put(tr);
5019 m = file->private_data;
5025 static int show_traces_release(struct inode *inode, struct file *file)
5027 struct trace_array *tr = inode->i_private;
5029 trace_array_put(tr);
5030 return seq_release(inode, file);
5034 tracing_write_stub(struct file *filp, const char __user *ubuf,
5035 size_t count, loff_t *ppos)
5040 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5044 if (file->f_mode & FMODE_READ)
5045 ret = seq_lseek(file, offset, whence);
5047 file->f_pos = ret = 0;
5052 static const struct file_operations tracing_fops = {
5053 .open = tracing_open,
5055 .write = tracing_write_stub,
5056 .llseek = tracing_lseek,
5057 .release = tracing_release,
5060 static const struct file_operations show_traces_fops = {
5061 .open = show_traces_open,
5063 .llseek = seq_lseek,
5064 .release = show_traces_release,
5068 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5069 size_t count, loff_t *ppos)
5071 struct trace_array *tr = file_inode(filp)->i_private;
5075 len = snprintf(NULL, 0, "%*pb\n",
5076 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5077 mask_str = kmalloc(len, GFP_KERNEL);
5081 len = snprintf(mask_str, len, "%*pb\n",
5082 cpumask_pr_args(tr->tracing_cpumask));
5087 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5095 int tracing_set_cpumask(struct trace_array *tr,
5096 cpumask_var_t tracing_cpumask_new)
5103 local_irq_disable();
5104 arch_spin_lock(&tr->max_lock);
5105 for_each_tracing_cpu(cpu) {
5107 * Increase/decrease the disabled counter if we are
5108 * about to flip a bit in the cpumask:
5110 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5111 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5112 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5113 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5115 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5116 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5117 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5118 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5121 arch_spin_unlock(&tr->max_lock);
5124 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5130 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5131 size_t count, loff_t *ppos)
5133 struct trace_array *tr = file_inode(filp)->i_private;
5134 cpumask_var_t tracing_cpumask_new;
5137 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5140 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5144 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5148 free_cpumask_var(tracing_cpumask_new);
5153 free_cpumask_var(tracing_cpumask_new);
5158 static const struct file_operations tracing_cpumask_fops = {
5159 .open = tracing_open_generic_tr,
5160 .read = tracing_cpumask_read,
5161 .write = tracing_cpumask_write,
5162 .release = tracing_release_generic_tr,
5163 .llseek = generic_file_llseek,
5166 static int tracing_trace_options_show(struct seq_file *m, void *v)
5168 struct tracer_opt *trace_opts;
5169 struct trace_array *tr = m->private;
5173 mutex_lock(&trace_types_lock);
5174 tracer_flags = tr->current_trace->flags->val;
5175 trace_opts = tr->current_trace->flags->opts;
5177 for (i = 0; trace_options[i]; i++) {
5178 if (tr->trace_flags & (1 << i))
5179 seq_printf(m, "%s\n", trace_options[i]);
5181 seq_printf(m, "no%s\n", trace_options[i]);
5184 for (i = 0; trace_opts[i].name; i++) {
5185 if (tracer_flags & trace_opts[i].bit)
5186 seq_printf(m, "%s\n", trace_opts[i].name);
5188 seq_printf(m, "no%s\n", trace_opts[i].name);
5190 mutex_unlock(&trace_types_lock);
5195 static int __set_tracer_option(struct trace_array *tr,
5196 struct tracer_flags *tracer_flags,
5197 struct tracer_opt *opts, int neg)
5199 struct tracer *trace = tracer_flags->trace;
5202 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5207 tracer_flags->val &= ~opts->bit;
5209 tracer_flags->val |= opts->bit;
5213 /* Try to assign a tracer specific option */
5214 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5216 struct tracer *trace = tr->current_trace;
5217 struct tracer_flags *tracer_flags = trace->flags;
5218 struct tracer_opt *opts = NULL;
5221 for (i = 0; tracer_flags->opts[i].name; i++) {
5222 opts = &tracer_flags->opts[i];
5224 if (strcmp(cmp, opts->name) == 0)
5225 return __set_tracer_option(tr, trace->flags, opts, neg);
5231 /* Some tracers require overwrite to stay enabled */
5232 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5234 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5240 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5244 if ((mask == TRACE_ITER_RECORD_TGID) ||
5245 (mask == TRACE_ITER_RECORD_CMD))
5246 lockdep_assert_held(&event_mutex);
5248 /* do nothing if flag is already set */
5249 if (!!(tr->trace_flags & mask) == !!enabled)
5252 /* Give the tracer a chance to approve the change */
5253 if (tr->current_trace->flag_changed)
5254 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5258 tr->trace_flags |= mask;
5260 tr->trace_flags &= ~mask;
5262 if (mask == TRACE_ITER_RECORD_CMD)
5263 trace_event_enable_cmd_record(enabled);
5265 if (mask == TRACE_ITER_RECORD_TGID) {
5267 tgid_map_max = pid_max;
5268 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5272 * Pairs with smp_load_acquire() in
5273 * trace_find_tgid_ptr() to ensure that if it observes
5274 * the tgid_map we just allocated then it also observes
5275 * the corresponding tgid_map_max value.
5277 smp_store_release(&tgid_map, map);
5280 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5284 trace_event_enable_tgid_record(enabled);
5287 if (mask == TRACE_ITER_EVENT_FORK)
5288 trace_event_follow_fork(tr, enabled);
5290 if (mask == TRACE_ITER_FUNC_FORK)
5291 ftrace_pid_follow_fork(tr, enabled);
5293 if (mask == TRACE_ITER_OVERWRITE) {
5294 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5295 #ifdef CONFIG_TRACER_MAX_TRACE
5296 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5300 if (mask == TRACE_ITER_PRINTK) {
5301 trace_printk_start_stop_comm(enabled);
5302 trace_printk_control(enabled);
5308 int trace_set_options(struct trace_array *tr, char *option)
5313 size_t orig_len = strlen(option);
5316 cmp = strstrip(option);
5318 len = str_has_prefix(cmp, "no");
5324 mutex_lock(&event_mutex);
5325 mutex_lock(&trace_types_lock);
5327 ret = match_string(trace_options, -1, cmp);
5328 /* If no option could be set, test the specific tracer options */
5330 ret = set_tracer_option(tr, cmp, neg);
5332 ret = set_tracer_flag(tr, 1 << ret, !neg);
5334 mutex_unlock(&trace_types_lock);
5335 mutex_unlock(&event_mutex);
5338 * If the first trailing whitespace is replaced with '\0' by strstrip,
5339 * turn it back into a space.
5341 if (orig_len > strlen(option))
5342 option[strlen(option)] = ' ';
5347 static void __init apply_trace_boot_options(void)
5349 char *buf = trace_boot_options_buf;
5353 option = strsep(&buf, ",");
5359 trace_set_options(&global_trace, option);
5361 /* Put back the comma to allow this to be called again */
5368 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5369 size_t cnt, loff_t *ppos)
5371 struct seq_file *m = filp->private_data;
5372 struct trace_array *tr = m->private;
5376 if (cnt >= sizeof(buf))
5379 if (copy_from_user(buf, ubuf, cnt))
5384 ret = trace_set_options(tr, buf);
5393 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5395 struct trace_array *tr = inode->i_private;
5398 ret = tracing_check_open_get_tr(tr);
5402 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5404 trace_array_put(tr);
5409 static const struct file_operations tracing_iter_fops = {
5410 .open = tracing_trace_options_open,
5412 .llseek = seq_lseek,
5413 .release = tracing_single_release_tr,
5414 .write = tracing_trace_options_write,
5417 static const char readme_msg[] =
5418 "tracing mini-HOWTO:\n\n"
5419 "# echo 0 > tracing_on : quick way to disable tracing\n"
5420 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5421 " Important files:\n"
5422 " trace\t\t\t- The static contents of the buffer\n"
5423 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5424 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5425 " current_tracer\t- function and latency tracers\n"
5426 " available_tracers\t- list of configured tracers for current_tracer\n"
5427 " error_log\t- error log for failed commands (that support it)\n"
5428 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5429 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5430 " trace_clock\t\t-change the clock used to order events\n"
5431 " local: Per cpu clock but may not be synced across CPUs\n"
5432 " global: Synced across CPUs but slows tracing down.\n"
5433 " counter: Not a clock, but just an increment\n"
5434 " uptime: Jiffy counter from time of boot\n"
5435 " perf: Same clock that perf events use\n"
5436 #ifdef CONFIG_X86_64
5437 " x86-tsc: TSC cycle counter\n"
5439 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5440 " delta: Delta difference against a buffer-wide timestamp\n"
5441 " absolute: Absolute (standalone) timestamp\n"
5442 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5443 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5444 " tracing_cpumask\t- Limit which CPUs to trace\n"
5445 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5446 "\t\t\t Remove sub-buffer with rmdir\n"
5447 " trace_options\t\t- Set format or modify how tracing happens\n"
5448 "\t\t\t Disable an option by prefixing 'no' to the\n"
5449 "\t\t\t option name\n"
5450 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5451 #ifdef CONFIG_DYNAMIC_FTRACE
5452 "\n available_filter_functions - list of functions that can be filtered on\n"
5453 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5454 "\t\t\t functions\n"
5455 "\t accepts: func_full_name or glob-matching-pattern\n"
5456 "\t modules: Can select a group via module\n"
5457 "\t Format: :mod:<module-name>\n"
5458 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5459 "\t triggers: a command to perform when function is hit\n"
5460 "\t Format: <function>:<trigger>[:count]\n"
5461 "\t trigger: traceon, traceoff\n"
5462 "\t\t enable_event:<system>:<event>\n"
5463 "\t\t disable_event:<system>:<event>\n"
5464 #ifdef CONFIG_STACKTRACE
5467 #ifdef CONFIG_TRACER_SNAPSHOT
5472 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5473 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5474 "\t The first one will disable tracing every time do_fault is hit\n"
5475 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5476 "\t The first time do trap is hit and it disables tracing, the\n"
5477 "\t counter will decrement to 2. If tracing is already disabled,\n"
5478 "\t the counter will not decrement. It only decrements when the\n"
5479 "\t trigger did work\n"
5480 "\t To remove trigger without count:\n"
5481 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5482 "\t To remove trigger with a count:\n"
5483 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5484 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5485 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5486 "\t modules: Can select a group via module command :mod:\n"
5487 "\t Does not accept triggers\n"
5488 #endif /* CONFIG_DYNAMIC_FTRACE */
5489 #ifdef CONFIG_FUNCTION_TRACER
5490 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5492 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5495 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5496 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5497 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5498 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5500 #ifdef CONFIG_TRACER_SNAPSHOT
5501 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5502 "\t\t\t snapshot buffer. Read the contents for more\n"
5503 "\t\t\t information\n"
5505 #ifdef CONFIG_STACK_TRACER
5506 " stack_trace\t\t- Shows the max stack trace when active\n"
5507 " stack_max_size\t- Shows current max stack size that was traced\n"
5508 "\t\t\t Write into this file to reset the max size (trigger a\n"
5509 "\t\t\t new trace)\n"
5510 #ifdef CONFIG_DYNAMIC_FTRACE
5511 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5514 #endif /* CONFIG_STACK_TRACER */
5515 #ifdef CONFIG_DYNAMIC_EVENTS
5516 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5517 "\t\t\t Write into this file to define/undefine new trace events.\n"
5519 #ifdef CONFIG_KPROBE_EVENTS
5520 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5521 "\t\t\t Write into this file to define/undefine new trace events.\n"
5523 #ifdef CONFIG_UPROBE_EVENTS
5524 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5525 "\t\t\t Write into this file to define/undefine new trace events.\n"
5527 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5528 "\t accepts: event-definitions (one definition per line)\n"
5529 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5530 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5531 #ifdef CONFIG_HIST_TRIGGERS
5532 "\t s:[synthetic/]<event> <field> [<field>]\n"
5534 "\t e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]\n"
5535 "\t -:[<group>/]<event>\n"
5536 #ifdef CONFIG_KPROBE_EVENTS
5537 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5538 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5540 #ifdef CONFIG_UPROBE_EVENTS
5541 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5543 "\t args: <name>=fetcharg[:type]\n"
5544 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5545 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5546 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5548 "\t $stack<index>, $stack, $retval, $comm,\n"
5550 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5551 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5552 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5553 "\t <type>\\[<array-size>\\]\n"
5554 #ifdef CONFIG_HIST_TRIGGERS
5555 "\t field: <stype> <name>;\n"
5556 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5557 "\t [unsigned] char/int/long\n"
5559 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5560 "\t of the <attached-group>/<attached-event>.\n"
5562 " events/\t\t- Directory containing all trace event subsystems:\n"
5563 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5564 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5565 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5567 " filter\t\t- If set, only events passing filter are traced\n"
5568 " events/<system>/<event>/\t- Directory containing control files for\n"
5570 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5571 " filter\t\t- If set, only events passing filter are traced\n"
5572 " trigger\t\t- If set, a command to perform when event is hit\n"
5573 "\t Format: <trigger>[:count][if <filter>]\n"
5574 "\t trigger: traceon, traceoff\n"
5575 "\t enable_event:<system>:<event>\n"
5576 "\t disable_event:<system>:<event>\n"
5577 #ifdef CONFIG_HIST_TRIGGERS
5578 "\t enable_hist:<system>:<event>\n"
5579 "\t disable_hist:<system>:<event>\n"
5581 #ifdef CONFIG_STACKTRACE
5584 #ifdef CONFIG_TRACER_SNAPSHOT
5587 #ifdef CONFIG_HIST_TRIGGERS
5588 "\t\t hist (see below)\n"
5590 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5591 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5592 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5593 "\t events/block/block_unplug/trigger\n"
5594 "\t The first disables tracing every time block_unplug is hit.\n"
5595 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5596 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5597 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5598 "\t Like function triggers, the counter is only decremented if it\n"
5599 "\t enabled or disabled tracing.\n"
5600 "\t To remove a trigger without a count:\n"
5601 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5602 "\t To remove a trigger with a count:\n"
5603 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5604 "\t Filters can be ignored when removing a trigger.\n"
5605 #ifdef CONFIG_HIST_TRIGGERS
5606 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5607 "\t Format: hist:keys=<field1[,field2,...]>\n"
5608 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5609 "\t [:values=<field1[,field2,...]>]\n"
5610 "\t [:sort=<field1[,field2,...]>]\n"
5611 "\t [:size=#entries]\n"
5612 "\t [:pause][:continue][:clear]\n"
5613 "\t [:name=histname1]\n"
5614 "\t [:<handler>.<action>]\n"
5615 "\t [if <filter>]\n\n"
5616 "\t Note, special fields can be used as well:\n"
5617 "\t common_timestamp - to record current timestamp\n"
5618 "\t common_cpu - to record the CPU the event happened on\n"
5620 "\t A hist trigger variable can be:\n"
5621 "\t - a reference to a field e.g. x=current_timestamp,\n"
5622 "\t - a reference to another variable e.g. y=$x,\n"
5623 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5624 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5626 "\t hist trigger aritmethic expressions support addition(+), subtraction(-),\n"
5627 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5628 "\t variable reference, field or numeric literal.\n"
5630 "\t When a matching event is hit, an entry is added to a hash\n"
5631 "\t table using the key(s) and value(s) named, and the value of a\n"
5632 "\t sum called 'hitcount' is incremented. Keys and values\n"
5633 "\t correspond to fields in the event's format description. Keys\n"
5634 "\t can be any field, or the special string 'stacktrace'.\n"
5635 "\t Compound keys consisting of up to two fields can be specified\n"
5636 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5637 "\t fields. Sort keys consisting of up to two fields can be\n"
5638 "\t specified using the 'sort' keyword. The sort direction can\n"
5639 "\t be modified by appending '.descending' or '.ascending' to a\n"
5640 "\t sort field. The 'size' parameter can be used to specify more\n"
5641 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5642 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5643 "\t its histogram data will be shared with other triggers of the\n"
5644 "\t same name, and trigger hits will update this common data.\n\n"
5645 "\t Reading the 'hist' file for the event will dump the hash\n"
5646 "\t table in its entirety to stdout. If there are multiple hist\n"
5647 "\t triggers attached to an event, there will be a table for each\n"
5648 "\t trigger in the output. The table displayed for a named\n"
5649 "\t trigger will be the same as any other instance having the\n"
5650 "\t same name. The default format used to display a given field\n"
5651 "\t can be modified by appending any of the following modifiers\n"
5652 "\t to the field name, as applicable:\n\n"
5653 "\t .hex display a number as a hex value\n"
5654 "\t .sym display an address as a symbol\n"
5655 "\t .sym-offset display an address as a symbol and offset\n"
5656 "\t .execname display a common_pid as a program name\n"
5657 "\t .syscall display a syscall id as a syscall name\n"
5658 "\t .log2 display log2 value rather than raw number\n"
5659 "\t .buckets=size display values in groups of size rather than raw number\n"
5660 "\t .usecs display a common_timestamp in microseconds\n\n"
5661 "\t The 'pause' parameter can be used to pause an existing hist\n"
5662 "\t trigger or to start a hist trigger but not log any events\n"
5663 "\t until told to do so. 'continue' can be used to start or\n"
5664 "\t restart a paused hist trigger.\n\n"
5665 "\t The 'clear' parameter will clear the contents of a running\n"
5666 "\t hist trigger and leave its current paused/active state\n"
5668 "\t The enable_hist and disable_hist triggers can be used to\n"
5669 "\t have one event conditionally start and stop another event's\n"
5670 "\t already-attached hist trigger. The syntax is analogous to\n"
5671 "\t the enable_event and disable_event triggers.\n\n"
5672 "\t Hist trigger handlers and actions are executed whenever a\n"
5673 "\t a histogram entry is added or updated. They take the form:\n\n"
5674 "\t <handler>.<action>\n\n"
5675 "\t The available handlers are:\n\n"
5676 "\t onmatch(matching.event) - invoke on addition or update\n"
5677 "\t onmax(var) - invoke if var exceeds current max\n"
5678 "\t onchange(var) - invoke action if var changes\n\n"
5679 "\t The available actions are:\n\n"
5680 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5681 "\t save(field,...) - save current event fields\n"
5682 #ifdef CONFIG_TRACER_SNAPSHOT
5683 "\t snapshot() - snapshot the trace buffer\n\n"
5685 #ifdef CONFIG_SYNTH_EVENTS
5686 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5687 "\t Write into this file to define/undefine new synthetic events.\n"
5688 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5694 tracing_readme_read(struct file *filp, char __user *ubuf,
5695 size_t cnt, loff_t *ppos)
5697 return simple_read_from_buffer(ubuf, cnt, ppos,
5698 readme_msg, strlen(readme_msg));
5701 static const struct file_operations tracing_readme_fops = {
5702 .open = tracing_open_generic,
5703 .read = tracing_readme_read,
5704 .llseek = generic_file_llseek,
5707 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5711 return trace_find_tgid_ptr(pid);
5714 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5718 return trace_find_tgid_ptr(pid);
5721 static void saved_tgids_stop(struct seq_file *m, void *v)
5725 static int saved_tgids_show(struct seq_file *m, void *v)
5727 int *entry = (int *)v;
5728 int pid = entry - tgid_map;
5734 seq_printf(m, "%d %d\n", pid, tgid);
5738 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5739 .start = saved_tgids_start,
5740 .stop = saved_tgids_stop,
5741 .next = saved_tgids_next,
5742 .show = saved_tgids_show,
5745 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5749 ret = tracing_check_open_get_tr(NULL);
5753 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5757 static const struct file_operations tracing_saved_tgids_fops = {
5758 .open = tracing_saved_tgids_open,
5760 .llseek = seq_lseek,
5761 .release = seq_release,
5764 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5766 unsigned int *ptr = v;
5768 if (*pos || m->count)
5773 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5775 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5784 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5790 arch_spin_lock(&trace_cmdline_lock);
5792 v = &savedcmd->map_cmdline_to_pid[0];
5794 v = saved_cmdlines_next(m, v, &l);
5802 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5804 arch_spin_unlock(&trace_cmdline_lock);
5808 static int saved_cmdlines_show(struct seq_file *m, void *v)
5810 char buf[TASK_COMM_LEN];
5811 unsigned int *pid = v;
5813 __trace_find_cmdline(*pid, buf);
5814 seq_printf(m, "%d %s\n", *pid, buf);
5818 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5819 .start = saved_cmdlines_start,
5820 .next = saved_cmdlines_next,
5821 .stop = saved_cmdlines_stop,
5822 .show = saved_cmdlines_show,
5825 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5829 ret = tracing_check_open_get_tr(NULL);
5833 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5836 static const struct file_operations tracing_saved_cmdlines_fops = {
5837 .open = tracing_saved_cmdlines_open,
5839 .llseek = seq_lseek,
5840 .release = seq_release,
5844 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5845 size_t cnt, loff_t *ppos)
5850 arch_spin_lock(&trace_cmdline_lock);
5851 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5852 arch_spin_unlock(&trace_cmdline_lock);
5854 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5857 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5859 kfree(s->saved_cmdlines);
5860 kfree(s->map_cmdline_to_pid);
5864 static int tracing_resize_saved_cmdlines(unsigned int val)
5866 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5868 s = kmalloc(sizeof(*s), GFP_KERNEL);
5872 if (allocate_cmdlines_buffer(val, s) < 0) {
5877 arch_spin_lock(&trace_cmdline_lock);
5878 savedcmd_temp = savedcmd;
5880 arch_spin_unlock(&trace_cmdline_lock);
5881 free_saved_cmdlines_buffer(savedcmd_temp);
5887 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5888 size_t cnt, loff_t *ppos)
5893 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5897 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5898 if (!val || val > PID_MAX_DEFAULT)
5901 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5910 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5911 .open = tracing_open_generic,
5912 .read = tracing_saved_cmdlines_size_read,
5913 .write = tracing_saved_cmdlines_size_write,
5916 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5917 static union trace_eval_map_item *
5918 update_eval_map(union trace_eval_map_item *ptr)
5920 if (!ptr->map.eval_string) {
5921 if (ptr->tail.next) {
5922 ptr = ptr->tail.next;
5923 /* Set ptr to the next real item (skip head) */
5931 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5933 union trace_eval_map_item *ptr = v;
5936 * Paranoid! If ptr points to end, we don't want to increment past it.
5937 * This really should never happen.
5940 ptr = update_eval_map(ptr);
5941 if (WARN_ON_ONCE(!ptr))
5945 ptr = update_eval_map(ptr);
5950 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5952 union trace_eval_map_item *v;
5955 mutex_lock(&trace_eval_mutex);
5957 v = trace_eval_maps;
5961 while (v && l < *pos) {
5962 v = eval_map_next(m, v, &l);
5968 static void eval_map_stop(struct seq_file *m, void *v)
5970 mutex_unlock(&trace_eval_mutex);
5973 static int eval_map_show(struct seq_file *m, void *v)
5975 union trace_eval_map_item *ptr = v;
5977 seq_printf(m, "%s %ld (%s)\n",
5978 ptr->map.eval_string, ptr->map.eval_value,
5984 static const struct seq_operations tracing_eval_map_seq_ops = {
5985 .start = eval_map_start,
5986 .next = eval_map_next,
5987 .stop = eval_map_stop,
5988 .show = eval_map_show,
5991 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5995 ret = tracing_check_open_get_tr(NULL);
5999 return seq_open(filp, &tracing_eval_map_seq_ops);
6002 static const struct file_operations tracing_eval_map_fops = {
6003 .open = tracing_eval_map_open,
6005 .llseek = seq_lseek,
6006 .release = seq_release,
6009 static inline union trace_eval_map_item *
6010 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6012 /* Return tail of array given the head */
6013 return ptr + ptr->head.length + 1;
6017 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6020 struct trace_eval_map **stop;
6021 struct trace_eval_map **map;
6022 union trace_eval_map_item *map_array;
6023 union trace_eval_map_item *ptr;
6028 * The trace_eval_maps contains the map plus a head and tail item,
6029 * where the head holds the module and length of array, and the
6030 * tail holds a pointer to the next list.
6032 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6034 pr_warn("Unable to allocate trace eval mapping\n");
6038 mutex_lock(&trace_eval_mutex);
6040 if (!trace_eval_maps)
6041 trace_eval_maps = map_array;
6043 ptr = trace_eval_maps;
6045 ptr = trace_eval_jmp_to_tail(ptr);
6046 if (!ptr->tail.next)
6048 ptr = ptr->tail.next;
6051 ptr->tail.next = map_array;
6053 map_array->head.mod = mod;
6054 map_array->head.length = len;
6057 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6058 map_array->map = **map;
6061 memset(map_array, 0, sizeof(*map_array));
6063 mutex_unlock(&trace_eval_mutex);
6066 static void trace_create_eval_file(struct dentry *d_tracer)
6068 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6069 NULL, &tracing_eval_map_fops);
6072 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6073 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6074 static inline void trace_insert_eval_map_file(struct module *mod,
6075 struct trace_eval_map **start, int len) { }
6076 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6078 static void trace_insert_eval_map(struct module *mod,
6079 struct trace_eval_map **start, int len)
6081 struct trace_eval_map **map;
6088 trace_event_eval_update(map, len);
6090 trace_insert_eval_map_file(mod, start, len);
6094 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6095 size_t cnt, loff_t *ppos)
6097 struct trace_array *tr = filp->private_data;
6098 char buf[MAX_TRACER_SIZE+2];
6101 mutex_lock(&trace_types_lock);
6102 r = sprintf(buf, "%s\n", tr->current_trace->name);
6103 mutex_unlock(&trace_types_lock);
6105 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6108 int tracer_init(struct tracer *t, struct trace_array *tr)
6110 tracing_reset_online_cpus(&tr->array_buffer);
6114 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6118 for_each_tracing_cpu(cpu)
6119 per_cpu_ptr(buf->data, cpu)->entries = val;
6122 #ifdef CONFIG_TRACER_MAX_TRACE
6123 /* resize @tr's buffer to the size of @size_tr's entries */
6124 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6125 struct array_buffer *size_buf, int cpu_id)
6129 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6130 for_each_tracing_cpu(cpu) {
6131 ret = ring_buffer_resize(trace_buf->buffer,
6132 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6135 per_cpu_ptr(trace_buf->data, cpu)->entries =
6136 per_cpu_ptr(size_buf->data, cpu)->entries;
6139 ret = ring_buffer_resize(trace_buf->buffer,
6140 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6142 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6143 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6148 #endif /* CONFIG_TRACER_MAX_TRACE */
6150 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6151 unsigned long size, int cpu)
6156 * If kernel or user changes the size of the ring buffer
6157 * we use the size that was given, and we can forget about
6158 * expanding it later.
6160 ring_buffer_expanded = true;
6162 /* May be called before buffers are initialized */
6163 if (!tr->array_buffer.buffer)
6166 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6170 #ifdef CONFIG_TRACER_MAX_TRACE
6171 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6172 !tr->current_trace->use_max_tr)
6175 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6177 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6178 &tr->array_buffer, cpu);
6181 * AARGH! We are left with different
6182 * size max buffer!!!!
6183 * The max buffer is our "snapshot" buffer.
6184 * When a tracer needs a snapshot (one of the
6185 * latency tracers), it swaps the max buffer
6186 * with the saved snap shot. We succeeded to
6187 * update the size of the main buffer, but failed to
6188 * update the size of the max buffer. But when we tried
6189 * to reset the main buffer to the original size, we
6190 * failed there too. This is very unlikely to
6191 * happen, but if it does, warn and kill all
6195 tracing_disabled = 1;
6200 if (cpu == RING_BUFFER_ALL_CPUS)
6201 set_buffer_entries(&tr->max_buffer, size);
6203 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6206 #endif /* CONFIG_TRACER_MAX_TRACE */
6208 if (cpu == RING_BUFFER_ALL_CPUS)
6209 set_buffer_entries(&tr->array_buffer, size);
6211 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6216 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6217 unsigned long size, int cpu_id)
6221 mutex_lock(&trace_types_lock);
6223 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6224 /* make sure, this cpu is enabled in the mask */
6225 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6231 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6236 mutex_unlock(&trace_types_lock);
6243 * tracing_update_buffers - used by tracing facility to expand ring buffers
6245 * To save on memory when the tracing is never used on a system with it
6246 * configured in. The ring buffers are set to a minimum size. But once
6247 * a user starts to use the tracing facility, then they need to grow
6248 * to their default size.
6250 * This function is to be called when a tracer is about to be used.
6252 int tracing_update_buffers(void)
6256 mutex_lock(&trace_types_lock);
6257 if (!ring_buffer_expanded)
6258 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6259 RING_BUFFER_ALL_CPUS);
6260 mutex_unlock(&trace_types_lock);
6265 struct trace_option_dentry;
6268 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6271 * Used to clear out the tracer before deletion of an instance.
6272 * Must have trace_types_lock held.
6274 static void tracing_set_nop(struct trace_array *tr)
6276 if (tr->current_trace == &nop_trace)
6279 tr->current_trace->enabled--;
6281 if (tr->current_trace->reset)
6282 tr->current_trace->reset(tr);
6284 tr->current_trace = &nop_trace;
6287 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6289 /* Only enable if the directory has been created already. */
6293 create_trace_option_files(tr, t);
6296 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6299 #ifdef CONFIG_TRACER_MAX_TRACE
6304 mutex_lock(&trace_types_lock);
6306 if (!ring_buffer_expanded) {
6307 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6308 RING_BUFFER_ALL_CPUS);
6314 for (t = trace_types; t; t = t->next) {
6315 if (strcmp(t->name, buf) == 0)
6322 if (t == tr->current_trace)
6325 #ifdef CONFIG_TRACER_SNAPSHOT
6326 if (t->use_max_tr) {
6327 arch_spin_lock(&tr->max_lock);
6328 if (tr->cond_snapshot)
6330 arch_spin_unlock(&tr->max_lock);
6335 /* Some tracers won't work on kernel command line */
6336 if (system_state < SYSTEM_RUNNING && t->noboot) {
6337 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6342 /* Some tracers are only allowed for the top level buffer */
6343 if (!trace_ok_for_array(t, tr)) {
6348 /* If trace pipe files are being read, we can't change the tracer */
6349 if (tr->trace_ref) {
6354 trace_branch_disable();
6356 tr->current_trace->enabled--;
6358 if (tr->current_trace->reset)
6359 tr->current_trace->reset(tr);
6361 /* Current trace needs to be nop_trace before synchronize_rcu */
6362 tr->current_trace = &nop_trace;
6364 #ifdef CONFIG_TRACER_MAX_TRACE
6365 had_max_tr = tr->allocated_snapshot;
6367 if (had_max_tr && !t->use_max_tr) {
6369 * We need to make sure that the update_max_tr sees that
6370 * current_trace changed to nop_trace to keep it from
6371 * swapping the buffers after we resize it.
6372 * The update_max_tr is called from interrupts disabled
6373 * so a synchronized_sched() is sufficient.
6380 #ifdef CONFIG_TRACER_MAX_TRACE
6381 if (t->use_max_tr && !had_max_tr) {
6382 ret = tracing_alloc_snapshot_instance(tr);
6389 ret = tracer_init(t, tr);
6394 tr->current_trace = t;
6395 tr->current_trace->enabled++;
6396 trace_branch_enable(tr);
6398 mutex_unlock(&trace_types_lock);
6404 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6405 size_t cnt, loff_t *ppos)
6407 struct trace_array *tr = filp->private_data;
6408 char buf[MAX_TRACER_SIZE+1];
6415 if (cnt > MAX_TRACER_SIZE)
6416 cnt = MAX_TRACER_SIZE;
6418 if (copy_from_user(buf, ubuf, cnt))
6423 /* strip ending whitespace. */
6424 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6427 err = tracing_set_tracer(tr, buf);
6437 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6438 size_t cnt, loff_t *ppos)
6443 r = snprintf(buf, sizeof(buf), "%ld\n",
6444 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6445 if (r > sizeof(buf))
6447 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6451 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6452 size_t cnt, loff_t *ppos)
6457 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6467 tracing_thresh_read(struct file *filp, char __user *ubuf,
6468 size_t cnt, loff_t *ppos)
6470 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6474 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6475 size_t cnt, loff_t *ppos)
6477 struct trace_array *tr = filp->private_data;
6480 mutex_lock(&trace_types_lock);
6481 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6485 if (tr->current_trace->update_thresh) {
6486 ret = tr->current_trace->update_thresh(tr);
6493 mutex_unlock(&trace_types_lock);
6498 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6501 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6502 size_t cnt, loff_t *ppos)
6504 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6508 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6509 size_t cnt, loff_t *ppos)
6511 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6516 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6518 struct trace_array *tr = inode->i_private;
6519 struct trace_iterator *iter;
6522 ret = tracing_check_open_get_tr(tr);
6526 mutex_lock(&trace_types_lock);
6528 /* create a buffer to store the information to pass to userspace */
6529 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6532 __trace_array_put(tr);
6536 trace_seq_init(&iter->seq);
6537 iter->trace = tr->current_trace;
6539 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6544 /* trace pipe does not show start of buffer */
6545 cpumask_setall(iter->started);
6547 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6548 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6550 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6551 if (trace_clocks[tr->clock_id].in_ns)
6552 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6555 iter->array_buffer = &tr->array_buffer;
6556 iter->cpu_file = tracing_get_cpu(inode);
6557 mutex_init(&iter->mutex);
6558 filp->private_data = iter;
6560 if (iter->trace->pipe_open)
6561 iter->trace->pipe_open(iter);
6563 nonseekable_open(inode, filp);
6567 mutex_unlock(&trace_types_lock);
6572 __trace_array_put(tr);
6573 mutex_unlock(&trace_types_lock);
6577 static int tracing_release_pipe(struct inode *inode, struct file *file)
6579 struct trace_iterator *iter = file->private_data;
6580 struct trace_array *tr = inode->i_private;
6582 mutex_lock(&trace_types_lock);
6586 if (iter->trace->pipe_close)
6587 iter->trace->pipe_close(iter);
6589 mutex_unlock(&trace_types_lock);
6591 free_cpumask_var(iter->started);
6592 mutex_destroy(&iter->mutex);
6595 trace_array_put(tr);
6601 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6603 struct trace_array *tr = iter->tr;
6605 /* Iterators are static, they should be filled or empty */
6606 if (trace_buffer_iter(iter, iter->cpu_file))
6607 return EPOLLIN | EPOLLRDNORM;
6609 if (tr->trace_flags & TRACE_ITER_BLOCK)
6611 * Always select as readable when in blocking mode
6613 return EPOLLIN | EPOLLRDNORM;
6615 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6620 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6622 struct trace_iterator *iter = filp->private_data;
6624 return trace_poll(iter, filp, poll_table);
6627 /* Must be called with iter->mutex held. */
6628 static int tracing_wait_pipe(struct file *filp)
6630 struct trace_iterator *iter = filp->private_data;
6633 while (trace_empty(iter)) {
6635 if ((filp->f_flags & O_NONBLOCK)) {
6640 * We block until we read something and tracing is disabled.
6641 * We still block if tracing is disabled, but we have never
6642 * read anything. This allows a user to cat this file, and
6643 * then enable tracing. But after we have read something,
6644 * we give an EOF when tracing is again disabled.
6646 * iter->pos will be 0 if we haven't read anything.
6648 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6651 mutex_unlock(&iter->mutex);
6653 ret = wait_on_pipe(iter, 0);
6655 mutex_lock(&iter->mutex);
6668 tracing_read_pipe(struct file *filp, char __user *ubuf,
6669 size_t cnt, loff_t *ppos)
6671 struct trace_iterator *iter = filp->private_data;
6675 * Avoid more than one consumer on a single file descriptor
6676 * This is just a matter of traces coherency, the ring buffer itself
6679 mutex_lock(&iter->mutex);
6681 /* return any leftover data */
6682 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6686 trace_seq_init(&iter->seq);
6688 if (iter->trace->read) {
6689 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6695 sret = tracing_wait_pipe(filp);
6699 /* stop when tracing is finished */
6700 if (trace_empty(iter)) {
6705 if (cnt >= PAGE_SIZE)
6706 cnt = PAGE_SIZE - 1;
6708 /* reset all but tr, trace, and overruns */
6709 memset(&iter->seq, 0,
6710 sizeof(struct trace_iterator) -
6711 offsetof(struct trace_iterator, seq));
6712 cpumask_clear(iter->started);
6713 trace_seq_init(&iter->seq);
6716 trace_event_read_lock();
6717 trace_access_lock(iter->cpu_file);
6718 while (trace_find_next_entry_inc(iter) != NULL) {
6719 enum print_line_t ret;
6720 int save_len = iter->seq.seq.len;
6722 ret = print_trace_line(iter);
6723 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6724 /* don't print partial lines */
6725 iter->seq.seq.len = save_len;
6728 if (ret != TRACE_TYPE_NO_CONSUME)
6729 trace_consume(iter);
6731 if (trace_seq_used(&iter->seq) >= cnt)
6735 * Setting the full flag means we reached the trace_seq buffer
6736 * size and we should leave by partial output condition above.
6737 * One of the trace_seq_* functions is not used properly.
6739 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6742 trace_access_unlock(iter->cpu_file);
6743 trace_event_read_unlock();
6745 /* Now copy what we have to the user */
6746 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6747 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6748 trace_seq_init(&iter->seq);
6751 * If there was nothing to send to user, in spite of consuming trace
6752 * entries, go back to wait for more entries.
6758 mutex_unlock(&iter->mutex);
6763 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6766 __free_page(spd->pages[idx]);
6770 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6776 /* Seq buffer is page-sized, exactly what we need. */
6778 save_len = iter->seq.seq.len;
6779 ret = print_trace_line(iter);
6781 if (trace_seq_has_overflowed(&iter->seq)) {
6782 iter->seq.seq.len = save_len;
6787 * This should not be hit, because it should only
6788 * be set if the iter->seq overflowed. But check it
6789 * anyway to be safe.
6791 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6792 iter->seq.seq.len = save_len;
6796 count = trace_seq_used(&iter->seq) - save_len;
6799 iter->seq.seq.len = save_len;
6803 if (ret != TRACE_TYPE_NO_CONSUME)
6804 trace_consume(iter);
6806 if (!trace_find_next_entry_inc(iter)) {
6816 static ssize_t tracing_splice_read_pipe(struct file *filp,
6818 struct pipe_inode_info *pipe,
6822 struct page *pages_def[PIPE_DEF_BUFFERS];
6823 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6824 struct trace_iterator *iter = filp->private_data;
6825 struct splice_pipe_desc spd = {
6827 .partial = partial_def,
6828 .nr_pages = 0, /* This gets updated below. */
6829 .nr_pages_max = PIPE_DEF_BUFFERS,
6830 .ops = &default_pipe_buf_ops,
6831 .spd_release = tracing_spd_release_pipe,
6837 if (splice_grow_spd(pipe, &spd))
6840 mutex_lock(&iter->mutex);
6842 if (iter->trace->splice_read) {
6843 ret = iter->trace->splice_read(iter, filp,
6844 ppos, pipe, len, flags);
6849 ret = tracing_wait_pipe(filp);
6853 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6858 trace_event_read_lock();
6859 trace_access_lock(iter->cpu_file);
6861 /* Fill as many pages as possible. */
6862 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6863 spd.pages[i] = alloc_page(GFP_KERNEL);
6867 rem = tracing_fill_pipe_page(rem, iter);
6869 /* Copy the data into the page, so we can start over. */
6870 ret = trace_seq_to_buffer(&iter->seq,
6871 page_address(spd.pages[i]),
6872 trace_seq_used(&iter->seq));
6874 __free_page(spd.pages[i]);
6877 spd.partial[i].offset = 0;
6878 spd.partial[i].len = trace_seq_used(&iter->seq);
6880 trace_seq_init(&iter->seq);
6883 trace_access_unlock(iter->cpu_file);
6884 trace_event_read_unlock();
6885 mutex_unlock(&iter->mutex);
6890 ret = splice_to_pipe(pipe, &spd);
6894 splice_shrink_spd(&spd);
6898 mutex_unlock(&iter->mutex);
6903 tracing_entries_read(struct file *filp, char __user *ubuf,
6904 size_t cnt, loff_t *ppos)
6906 struct inode *inode = file_inode(filp);
6907 struct trace_array *tr = inode->i_private;
6908 int cpu = tracing_get_cpu(inode);
6913 mutex_lock(&trace_types_lock);
6915 if (cpu == RING_BUFFER_ALL_CPUS) {
6916 int cpu, buf_size_same;
6921 /* check if all cpu sizes are same */
6922 for_each_tracing_cpu(cpu) {
6923 /* fill in the size from first enabled cpu */
6925 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6926 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6932 if (buf_size_same) {
6933 if (!ring_buffer_expanded)
6934 r = sprintf(buf, "%lu (expanded: %lu)\n",
6936 trace_buf_size >> 10);
6938 r = sprintf(buf, "%lu\n", size >> 10);
6940 r = sprintf(buf, "X\n");
6942 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6944 mutex_unlock(&trace_types_lock);
6946 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6951 tracing_entries_write(struct file *filp, const char __user *ubuf,
6952 size_t cnt, loff_t *ppos)
6954 struct inode *inode = file_inode(filp);
6955 struct trace_array *tr = inode->i_private;
6959 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6963 /* must have at least 1 entry */
6967 /* value is in KB */
6969 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6979 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6980 size_t cnt, loff_t *ppos)
6982 struct trace_array *tr = filp->private_data;
6985 unsigned long size = 0, expanded_size = 0;
6987 mutex_lock(&trace_types_lock);
6988 for_each_tracing_cpu(cpu) {
6989 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6990 if (!ring_buffer_expanded)
6991 expanded_size += trace_buf_size >> 10;
6993 if (ring_buffer_expanded)
6994 r = sprintf(buf, "%lu\n", size);
6996 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6997 mutex_unlock(&trace_types_lock);
6999 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7003 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7004 size_t cnt, loff_t *ppos)
7007 * There is no need to read what the user has written, this function
7008 * is just to make sure that there is no error when "echo" is used
7017 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7019 struct trace_array *tr = inode->i_private;
7021 /* disable tracing ? */
7022 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7023 tracer_tracing_off(tr);
7024 /* resize the ring buffer to 0 */
7025 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7027 trace_array_put(tr);
7033 tracing_mark_write(struct file *filp, const char __user *ubuf,
7034 size_t cnt, loff_t *fpos)
7036 struct trace_array *tr = filp->private_data;
7037 struct ring_buffer_event *event;
7038 enum event_trigger_type tt = ETT_NONE;
7039 struct trace_buffer *buffer;
7040 struct print_entry *entry;
7045 /* Used in tracing_mark_raw_write() as well */
7046 #define FAULTED_STR "<faulted>"
7047 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7049 if (tracing_disabled)
7052 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7055 if (cnt > TRACE_BUF_SIZE)
7056 cnt = TRACE_BUF_SIZE;
7058 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7060 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7062 /* If less than "<faulted>", then make sure we can still add that */
7063 if (cnt < FAULTED_SIZE)
7064 size += FAULTED_SIZE - cnt;
7066 buffer = tr->array_buffer.buffer;
7067 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7069 if (unlikely(!event))
7070 /* Ring buffer disabled, return as if not open for write */
7073 entry = ring_buffer_event_data(event);
7074 entry->ip = _THIS_IP_;
7076 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7078 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7084 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7085 /* do not add \n before testing triggers, but add \0 */
7086 entry->buf[cnt] = '\0';
7087 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7090 if (entry->buf[cnt - 1] != '\n') {
7091 entry->buf[cnt] = '\n';
7092 entry->buf[cnt + 1] = '\0';
7094 entry->buf[cnt] = '\0';
7096 if (static_branch_unlikely(&trace_marker_exports_enabled))
7097 ftrace_exports(event, TRACE_EXPORT_MARKER);
7098 __buffer_unlock_commit(buffer, event);
7101 event_triggers_post_call(tr->trace_marker_file, tt);
7109 /* Limit it for now to 3K (including tag) */
7110 #define RAW_DATA_MAX_SIZE (1024*3)
7113 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7114 size_t cnt, loff_t *fpos)
7116 struct trace_array *tr = filp->private_data;
7117 struct ring_buffer_event *event;
7118 struct trace_buffer *buffer;
7119 struct raw_data_entry *entry;
7124 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7126 if (tracing_disabled)
7129 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7132 /* The marker must at least have a tag id */
7133 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7136 if (cnt > TRACE_BUF_SIZE)
7137 cnt = TRACE_BUF_SIZE;
7139 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7141 size = sizeof(*entry) + cnt;
7142 if (cnt < FAULT_SIZE_ID)
7143 size += FAULT_SIZE_ID - cnt;
7145 buffer = tr->array_buffer.buffer;
7146 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7149 /* Ring buffer disabled, return as if not open for write */
7152 entry = ring_buffer_event_data(event);
7154 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7157 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7162 __buffer_unlock_commit(buffer, event);
7170 static int tracing_clock_show(struct seq_file *m, void *v)
7172 struct trace_array *tr = m->private;
7175 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7177 "%s%s%s%s", i ? " " : "",
7178 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7179 i == tr->clock_id ? "]" : "");
7185 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7189 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7190 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7193 if (i == ARRAY_SIZE(trace_clocks))
7196 mutex_lock(&trace_types_lock);
7200 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7203 * New clock may not be consistent with the previous clock.
7204 * Reset the buffer so that it doesn't have incomparable timestamps.
7206 tracing_reset_online_cpus(&tr->array_buffer);
7208 #ifdef CONFIG_TRACER_MAX_TRACE
7209 if (tr->max_buffer.buffer)
7210 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7211 tracing_reset_online_cpus(&tr->max_buffer);
7214 mutex_unlock(&trace_types_lock);
7219 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7220 size_t cnt, loff_t *fpos)
7222 struct seq_file *m = filp->private_data;
7223 struct trace_array *tr = m->private;
7225 const char *clockstr;
7228 if (cnt >= sizeof(buf))
7231 if (copy_from_user(buf, ubuf, cnt))
7236 clockstr = strstrip(buf);
7238 ret = tracing_set_clock(tr, clockstr);
7247 static int tracing_clock_open(struct inode *inode, struct file *file)
7249 struct trace_array *tr = inode->i_private;
7252 ret = tracing_check_open_get_tr(tr);
7256 ret = single_open(file, tracing_clock_show, inode->i_private);
7258 trace_array_put(tr);
7263 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7265 struct trace_array *tr = m->private;
7267 mutex_lock(&trace_types_lock);
7269 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7270 seq_puts(m, "delta [absolute]\n");
7272 seq_puts(m, "[delta] absolute\n");
7274 mutex_unlock(&trace_types_lock);
7279 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7281 struct trace_array *tr = inode->i_private;
7284 ret = tracing_check_open_get_tr(tr);
7288 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7290 trace_array_put(tr);
7295 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7297 if (rbe == this_cpu_read(trace_buffered_event))
7298 return ring_buffer_time_stamp(buffer);
7300 return ring_buffer_event_time_stamp(buffer, rbe);
7304 * Set or disable using the per CPU trace_buffer_event when possible.
7306 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7310 mutex_lock(&trace_types_lock);
7312 if (set && tr->no_filter_buffering_ref++)
7316 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7321 --tr->no_filter_buffering_ref;
7324 mutex_unlock(&trace_types_lock);
7329 struct ftrace_buffer_info {
7330 struct trace_iterator iter;
7332 unsigned int spare_cpu;
7336 #ifdef CONFIG_TRACER_SNAPSHOT
7337 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7339 struct trace_array *tr = inode->i_private;
7340 struct trace_iterator *iter;
7344 ret = tracing_check_open_get_tr(tr);
7348 if (file->f_mode & FMODE_READ) {
7349 iter = __tracing_open(inode, file, true);
7351 ret = PTR_ERR(iter);
7353 /* Writes still need the seq_file to hold the private data */
7355 m = kzalloc(sizeof(*m), GFP_KERNEL);
7358 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7366 iter->array_buffer = &tr->max_buffer;
7367 iter->cpu_file = tracing_get_cpu(inode);
7369 file->private_data = m;
7373 trace_array_put(tr);
7379 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7382 struct seq_file *m = filp->private_data;
7383 struct trace_iterator *iter = m->private;
7384 struct trace_array *tr = iter->tr;
7388 ret = tracing_update_buffers();
7392 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7396 mutex_lock(&trace_types_lock);
7398 if (tr->current_trace->use_max_tr) {
7403 arch_spin_lock(&tr->max_lock);
7404 if (tr->cond_snapshot)
7406 arch_spin_unlock(&tr->max_lock);
7412 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7416 if (tr->allocated_snapshot)
7420 /* Only allow per-cpu swap if the ring buffer supports it */
7421 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7422 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7427 if (tr->allocated_snapshot)
7428 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7429 &tr->array_buffer, iter->cpu_file);
7431 ret = tracing_alloc_snapshot_instance(tr);
7434 local_irq_disable();
7435 /* Now, we're going to swap */
7436 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7437 update_max_tr(tr, current, smp_processor_id(), NULL);
7439 update_max_tr_single(tr, current, iter->cpu_file);
7443 if (tr->allocated_snapshot) {
7444 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7445 tracing_reset_online_cpus(&tr->max_buffer);
7447 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7457 mutex_unlock(&trace_types_lock);
7461 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7463 struct seq_file *m = file->private_data;
7466 ret = tracing_release(inode, file);
7468 if (file->f_mode & FMODE_READ)
7471 /* If write only, the seq_file is just a stub */
7479 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7480 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7481 size_t count, loff_t *ppos);
7482 static int tracing_buffers_release(struct inode *inode, struct file *file);
7483 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7484 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7486 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7488 struct ftrace_buffer_info *info;
7491 /* The following checks for tracefs lockdown */
7492 ret = tracing_buffers_open(inode, filp);
7496 info = filp->private_data;
7498 if (info->iter.trace->use_max_tr) {
7499 tracing_buffers_release(inode, filp);
7503 info->iter.snapshot = true;
7504 info->iter.array_buffer = &info->iter.tr->max_buffer;
7509 #endif /* CONFIG_TRACER_SNAPSHOT */
7512 static const struct file_operations tracing_thresh_fops = {
7513 .open = tracing_open_generic,
7514 .read = tracing_thresh_read,
7515 .write = tracing_thresh_write,
7516 .llseek = generic_file_llseek,
7519 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7520 static const struct file_operations tracing_max_lat_fops = {
7521 .open = tracing_open_generic,
7522 .read = tracing_max_lat_read,
7523 .write = tracing_max_lat_write,
7524 .llseek = generic_file_llseek,
7528 static const struct file_operations set_tracer_fops = {
7529 .open = tracing_open_generic,
7530 .read = tracing_set_trace_read,
7531 .write = tracing_set_trace_write,
7532 .llseek = generic_file_llseek,
7535 static const struct file_operations tracing_pipe_fops = {
7536 .open = tracing_open_pipe,
7537 .poll = tracing_poll_pipe,
7538 .read = tracing_read_pipe,
7539 .splice_read = tracing_splice_read_pipe,
7540 .release = tracing_release_pipe,
7541 .llseek = no_llseek,
7544 static const struct file_operations tracing_entries_fops = {
7545 .open = tracing_open_generic_tr,
7546 .read = tracing_entries_read,
7547 .write = tracing_entries_write,
7548 .llseek = generic_file_llseek,
7549 .release = tracing_release_generic_tr,
7552 static const struct file_operations tracing_total_entries_fops = {
7553 .open = tracing_open_generic_tr,
7554 .read = tracing_total_entries_read,
7555 .llseek = generic_file_llseek,
7556 .release = tracing_release_generic_tr,
7559 static const struct file_operations tracing_free_buffer_fops = {
7560 .open = tracing_open_generic_tr,
7561 .write = tracing_free_buffer_write,
7562 .release = tracing_free_buffer_release,
7565 static const struct file_operations tracing_mark_fops = {
7566 .open = tracing_open_generic_tr,
7567 .write = tracing_mark_write,
7568 .llseek = generic_file_llseek,
7569 .release = tracing_release_generic_tr,
7572 static const struct file_operations tracing_mark_raw_fops = {
7573 .open = tracing_open_generic_tr,
7574 .write = tracing_mark_raw_write,
7575 .llseek = generic_file_llseek,
7576 .release = tracing_release_generic_tr,
7579 static const struct file_operations trace_clock_fops = {
7580 .open = tracing_clock_open,
7582 .llseek = seq_lseek,
7583 .release = tracing_single_release_tr,
7584 .write = tracing_clock_write,
7587 static const struct file_operations trace_time_stamp_mode_fops = {
7588 .open = tracing_time_stamp_mode_open,
7590 .llseek = seq_lseek,
7591 .release = tracing_single_release_tr,
7594 #ifdef CONFIG_TRACER_SNAPSHOT
7595 static const struct file_operations snapshot_fops = {
7596 .open = tracing_snapshot_open,
7598 .write = tracing_snapshot_write,
7599 .llseek = tracing_lseek,
7600 .release = tracing_snapshot_release,
7603 static const struct file_operations snapshot_raw_fops = {
7604 .open = snapshot_raw_open,
7605 .read = tracing_buffers_read,
7606 .release = tracing_buffers_release,
7607 .splice_read = tracing_buffers_splice_read,
7608 .llseek = no_llseek,
7611 #endif /* CONFIG_TRACER_SNAPSHOT */
7614 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7615 * @filp: The active open file structure
7616 * @ubuf: The userspace provided buffer to read value into
7617 * @cnt: The maximum number of bytes to read
7618 * @ppos: The current "file" position
7620 * This function implements the write interface for a struct trace_min_max_param.
7621 * The filp->private_data must point to a trace_min_max_param structure that
7622 * defines where to write the value, the min and the max acceptable values,
7623 * and a lock to protect the write.
7626 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7628 struct trace_min_max_param *param = filp->private_data;
7635 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7640 mutex_lock(param->lock);
7642 if (param->min && val < *param->min)
7645 if (param->max && val > *param->max)
7652 mutex_unlock(param->lock);
7661 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7662 * @filp: The active open file structure
7663 * @ubuf: The userspace provided buffer to read value into
7664 * @cnt: The maximum number of bytes to read
7665 * @ppos: The current "file" position
7667 * This function implements the read interface for a struct trace_min_max_param.
7668 * The filp->private_data must point to a trace_min_max_param struct with valid
7672 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7674 struct trace_min_max_param *param = filp->private_data;
7675 char buf[U64_STR_SIZE];
7684 if (cnt > sizeof(buf))
7687 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7689 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7692 const struct file_operations trace_min_max_fops = {
7693 .open = tracing_open_generic,
7694 .read = trace_min_max_read,
7695 .write = trace_min_max_write,
7698 #define TRACING_LOG_ERRS_MAX 8
7699 #define TRACING_LOG_LOC_MAX 128
7701 #define CMD_PREFIX " Command: "
7704 const char **errs; /* ptr to loc-specific array of err strings */
7705 u8 type; /* index into errs -> specific err string */
7706 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7710 struct tracing_log_err {
7711 struct list_head list;
7712 struct err_info info;
7713 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7714 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7717 static DEFINE_MUTEX(tracing_err_log_lock);
7719 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7721 struct tracing_log_err *err;
7723 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7724 err = kzalloc(sizeof(*err), GFP_KERNEL);
7726 err = ERR_PTR(-ENOMEM);
7727 tr->n_err_log_entries++;
7732 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7733 list_del(&err->list);
7739 * err_pos - find the position of a string within a command for error careting
7740 * @cmd: The tracing command that caused the error
7741 * @str: The string to position the caret at within @cmd
7743 * Finds the position of the first occurrence of @str within @cmd. The
7744 * return value can be passed to tracing_log_err() for caret placement
7747 * Returns the index within @cmd of the first occurrence of @str or 0
7748 * if @str was not found.
7750 unsigned int err_pos(char *cmd, const char *str)
7754 if (WARN_ON(!strlen(cmd)))
7757 found = strstr(cmd, str);
7765 * tracing_log_err - write an error to the tracing error log
7766 * @tr: The associated trace array for the error (NULL for top level array)
7767 * @loc: A string describing where the error occurred
7768 * @cmd: The tracing command that caused the error
7769 * @errs: The array of loc-specific static error strings
7770 * @type: The index into errs[], which produces the specific static err string
7771 * @pos: The position the caret should be placed in the cmd
7773 * Writes an error into tracing/error_log of the form:
7775 * <loc>: error: <text>
7779 * tracing/error_log is a small log file containing the last
7780 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7781 * unless there has been a tracing error, and the error log can be
7782 * cleared and have its memory freed by writing the empty string in
7783 * truncation mode to it i.e. echo > tracing/error_log.
7785 * NOTE: the @errs array along with the @type param are used to
7786 * produce a static error string - this string is not copied and saved
7787 * when the error is logged - only a pointer to it is saved. See
7788 * existing callers for examples of how static strings are typically
7789 * defined for use with tracing_log_err().
7791 void tracing_log_err(struct trace_array *tr,
7792 const char *loc, const char *cmd,
7793 const char **errs, u8 type, u8 pos)
7795 struct tracing_log_err *err;
7800 mutex_lock(&tracing_err_log_lock);
7801 err = get_tracing_log_err(tr);
7802 if (PTR_ERR(err) == -ENOMEM) {
7803 mutex_unlock(&tracing_err_log_lock);
7807 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7808 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7810 err->info.errs = errs;
7811 err->info.type = type;
7812 err->info.pos = pos;
7813 err->info.ts = local_clock();
7815 list_add_tail(&err->list, &tr->err_log);
7816 mutex_unlock(&tracing_err_log_lock);
7819 static void clear_tracing_err_log(struct trace_array *tr)
7821 struct tracing_log_err *err, *next;
7823 mutex_lock(&tracing_err_log_lock);
7824 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7825 list_del(&err->list);
7829 tr->n_err_log_entries = 0;
7830 mutex_unlock(&tracing_err_log_lock);
7833 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7835 struct trace_array *tr = m->private;
7837 mutex_lock(&tracing_err_log_lock);
7839 return seq_list_start(&tr->err_log, *pos);
7842 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7844 struct trace_array *tr = m->private;
7846 return seq_list_next(v, &tr->err_log, pos);
7849 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7851 mutex_unlock(&tracing_err_log_lock);
7854 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7858 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7860 for (i = 0; i < pos; i++)
7865 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7867 struct tracing_log_err *err = v;
7870 const char *err_text = err->info.errs[err->info.type];
7871 u64 sec = err->info.ts;
7874 nsec = do_div(sec, NSEC_PER_SEC);
7875 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7876 err->loc, err_text);
7877 seq_printf(m, "%s", err->cmd);
7878 tracing_err_log_show_pos(m, err->info.pos);
7884 static const struct seq_operations tracing_err_log_seq_ops = {
7885 .start = tracing_err_log_seq_start,
7886 .next = tracing_err_log_seq_next,
7887 .stop = tracing_err_log_seq_stop,
7888 .show = tracing_err_log_seq_show
7891 static int tracing_err_log_open(struct inode *inode, struct file *file)
7893 struct trace_array *tr = inode->i_private;
7896 ret = tracing_check_open_get_tr(tr);
7900 /* If this file was opened for write, then erase contents */
7901 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7902 clear_tracing_err_log(tr);
7904 if (file->f_mode & FMODE_READ) {
7905 ret = seq_open(file, &tracing_err_log_seq_ops);
7907 struct seq_file *m = file->private_data;
7910 trace_array_put(tr);
7916 static ssize_t tracing_err_log_write(struct file *file,
7917 const char __user *buffer,
7918 size_t count, loff_t *ppos)
7923 static int tracing_err_log_release(struct inode *inode, struct file *file)
7925 struct trace_array *tr = inode->i_private;
7927 trace_array_put(tr);
7929 if (file->f_mode & FMODE_READ)
7930 seq_release(inode, file);
7935 static const struct file_operations tracing_err_log_fops = {
7936 .open = tracing_err_log_open,
7937 .write = tracing_err_log_write,
7939 .llseek = seq_lseek,
7940 .release = tracing_err_log_release,
7943 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7945 struct trace_array *tr = inode->i_private;
7946 struct ftrace_buffer_info *info;
7949 ret = tracing_check_open_get_tr(tr);
7953 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7955 trace_array_put(tr);
7959 mutex_lock(&trace_types_lock);
7962 info->iter.cpu_file = tracing_get_cpu(inode);
7963 info->iter.trace = tr->current_trace;
7964 info->iter.array_buffer = &tr->array_buffer;
7966 /* Force reading ring buffer for first read */
7967 info->read = (unsigned int)-1;
7969 filp->private_data = info;
7973 mutex_unlock(&trace_types_lock);
7975 ret = nonseekable_open(inode, filp);
7977 trace_array_put(tr);
7983 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7985 struct ftrace_buffer_info *info = filp->private_data;
7986 struct trace_iterator *iter = &info->iter;
7988 return trace_poll(iter, filp, poll_table);
7992 tracing_buffers_read(struct file *filp, char __user *ubuf,
7993 size_t count, loff_t *ppos)
7995 struct ftrace_buffer_info *info = filp->private_data;
7996 struct trace_iterator *iter = &info->iter;
8003 #ifdef CONFIG_TRACER_MAX_TRACE
8004 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8009 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8011 if (IS_ERR(info->spare)) {
8012 ret = PTR_ERR(info->spare);
8015 info->spare_cpu = iter->cpu_file;
8021 /* Do we have previous read data to read? */
8022 if (info->read < PAGE_SIZE)
8026 trace_access_lock(iter->cpu_file);
8027 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8031 trace_access_unlock(iter->cpu_file);
8034 if (trace_empty(iter)) {
8035 if ((filp->f_flags & O_NONBLOCK))
8038 ret = wait_on_pipe(iter, 0);
8049 size = PAGE_SIZE - info->read;
8053 ret = copy_to_user(ubuf, info->spare + info->read, size);
8065 static int tracing_buffers_release(struct inode *inode, struct file *file)
8067 struct ftrace_buffer_info *info = file->private_data;
8068 struct trace_iterator *iter = &info->iter;
8070 mutex_lock(&trace_types_lock);
8072 iter->tr->trace_ref--;
8074 __trace_array_put(iter->tr);
8077 ring_buffer_free_read_page(iter->array_buffer->buffer,
8078 info->spare_cpu, info->spare);
8081 mutex_unlock(&trace_types_lock);
8087 struct trace_buffer *buffer;
8090 refcount_t refcount;
8093 static void buffer_ref_release(struct buffer_ref *ref)
8095 if (!refcount_dec_and_test(&ref->refcount))
8097 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8101 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8102 struct pipe_buffer *buf)
8104 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8106 buffer_ref_release(ref);
8110 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8111 struct pipe_buffer *buf)
8113 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8115 if (refcount_read(&ref->refcount) > INT_MAX/2)
8118 refcount_inc(&ref->refcount);
8122 /* Pipe buffer operations for a buffer. */
8123 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8124 .release = buffer_pipe_buf_release,
8125 .get = buffer_pipe_buf_get,
8129 * Callback from splice_to_pipe(), if we need to release some pages
8130 * at the end of the spd in case we error'ed out in filling the pipe.
8132 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8134 struct buffer_ref *ref =
8135 (struct buffer_ref *)spd->partial[i].private;
8137 buffer_ref_release(ref);
8138 spd->partial[i].private = 0;
8142 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8143 struct pipe_inode_info *pipe, size_t len,
8146 struct ftrace_buffer_info *info = file->private_data;
8147 struct trace_iterator *iter = &info->iter;
8148 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8149 struct page *pages_def[PIPE_DEF_BUFFERS];
8150 struct splice_pipe_desc spd = {
8152 .partial = partial_def,
8153 .nr_pages_max = PIPE_DEF_BUFFERS,
8154 .ops = &buffer_pipe_buf_ops,
8155 .spd_release = buffer_spd_release,
8157 struct buffer_ref *ref;
8161 #ifdef CONFIG_TRACER_MAX_TRACE
8162 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8166 if (*ppos & (PAGE_SIZE - 1))
8169 if (len & (PAGE_SIZE - 1)) {
8170 if (len < PAGE_SIZE)
8175 if (splice_grow_spd(pipe, &spd))
8179 trace_access_lock(iter->cpu_file);
8180 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8182 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8186 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8192 refcount_set(&ref->refcount, 1);
8193 ref->buffer = iter->array_buffer->buffer;
8194 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8195 if (IS_ERR(ref->page)) {
8196 ret = PTR_ERR(ref->page);
8201 ref->cpu = iter->cpu_file;
8203 r = ring_buffer_read_page(ref->buffer, &ref->page,
8204 len, iter->cpu_file, 1);
8206 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8212 page = virt_to_page(ref->page);
8214 spd.pages[i] = page;
8215 spd.partial[i].len = PAGE_SIZE;
8216 spd.partial[i].offset = 0;
8217 spd.partial[i].private = (unsigned long)ref;
8221 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8224 trace_access_unlock(iter->cpu_file);
8227 /* did we read anything? */
8228 if (!spd.nr_pages) {
8233 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8236 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8243 ret = splice_to_pipe(pipe, &spd);
8245 splice_shrink_spd(&spd);
8250 static const struct file_operations tracing_buffers_fops = {
8251 .open = tracing_buffers_open,
8252 .read = tracing_buffers_read,
8253 .poll = tracing_buffers_poll,
8254 .release = tracing_buffers_release,
8255 .splice_read = tracing_buffers_splice_read,
8256 .llseek = no_llseek,
8260 tracing_stats_read(struct file *filp, char __user *ubuf,
8261 size_t count, loff_t *ppos)
8263 struct inode *inode = file_inode(filp);
8264 struct trace_array *tr = inode->i_private;
8265 struct array_buffer *trace_buf = &tr->array_buffer;
8266 int cpu = tracing_get_cpu(inode);
8267 struct trace_seq *s;
8269 unsigned long long t;
8270 unsigned long usec_rem;
8272 s = kmalloc(sizeof(*s), GFP_KERNEL);
8278 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8279 trace_seq_printf(s, "entries: %ld\n", cnt);
8281 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8282 trace_seq_printf(s, "overrun: %ld\n", cnt);
8284 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8285 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8287 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8288 trace_seq_printf(s, "bytes: %ld\n", cnt);
8290 if (trace_clocks[tr->clock_id].in_ns) {
8291 /* local or global for trace_clock */
8292 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8293 usec_rem = do_div(t, USEC_PER_SEC);
8294 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8297 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8298 usec_rem = do_div(t, USEC_PER_SEC);
8299 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8301 /* counter or tsc mode for trace_clock */
8302 trace_seq_printf(s, "oldest event ts: %llu\n",
8303 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8305 trace_seq_printf(s, "now ts: %llu\n",
8306 ring_buffer_time_stamp(trace_buf->buffer));
8309 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8310 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8312 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8313 trace_seq_printf(s, "read events: %ld\n", cnt);
8315 count = simple_read_from_buffer(ubuf, count, ppos,
8316 s->buffer, trace_seq_used(s));
8323 static const struct file_operations tracing_stats_fops = {
8324 .open = tracing_open_generic_tr,
8325 .read = tracing_stats_read,
8326 .llseek = generic_file_llseek,
8327 .release = tracing_release_generic_tr,
8330 #ifdef CONFIG_DYNAMIC_FTRACE
8333 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8334 size_t cnt, loff_t *ppos)
8340 /* 256 should be plenty to hold the amount needed */
8341 buf = kmalloc(256, GFP_KERNEL);
8345 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8346 ftrace_update_tot_cnt,
8347 ftrace_number_of_pages,
8348 ftrace_number_of_groups);
8350 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8355 static const struct file_operations tracing_dyn_info_fops = {
8356 .open = tracing_open_generic,
8357 .read = tracing_read_dyn_info,
8358 .llseek = generic_file_llseek,
8360 #endif /* CONFIG_DYNAMIC_FTRACE */
8362 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8364 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8365 struct trace_array *tr, struct ftrace_probe_ops *ops,
8368 tracing_snapshot_instance(tr);
8372 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8373 struct trace_array *tr, struct ftrace_probe_ops *ops,
8376 struct ftrace_func_mapper *mapper = data;
8380 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8390 tracing_snapshot_instance(tr);
8394 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8395 struct ftrace_probe_ops *ops, void *data)
8397 struct ftrace_func_mapper *mapper = data;
8400 seq_printf(m, "%ps:", (void *)ip);
8402 seq_puts(m, "snapshot");
8405 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8408 seq_printf(m, ":count=%ld\n", *count);
8410 seq_puts(m, ":unlimited\n");
8416 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8417 unsigned long ip, void *init_data, void **data)
8419 struct ftrace_func_mapper *mapper = *data;
8422 mapper = allocate_ftrace_func_mapper();
8428 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8432 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8433 unsigned long ip, void *data)
8435 struct ftrace_func_mapper *mapper = data;
8440 free_ftrace_func_mapper(mapper, NULL);
8444 ftrace_func_mapper_remove_ip(mapper, ip);
8447 static struct ftrace_probe_ops snapshot_probe_ops = {
8448 .func = ftrace_snapshot,
8449 .print = ftrace_snapshot_print,
8452 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8453 .func = ftrace_count_snapshot,
8454 .print = ftrace_snapshot_print,
8455 .init = ftrace_snapshot_init,
8456 .free = ftrace_snapshot_free,
8460 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8461 char *glob, char *cmd, char *param, int enable)
8463 struct ftrace_probe_ops *ops;
8464 void *count = (void *)-1;
8471 /* hash funcs only work with set_ftrace_filter */
8475 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8478 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8483 number = strsep(¶m, ":");
8485 if (!strlen(number))
8489 * We use the callback data field (which is a pointer)
8492 ret = kstrtoul(number, 0, (unsigned long *)&count);
8497 ret = tracing_alloc_snapshot_instance(tr);
8501 ret = register_ftrace_function_probe(glob, tr, ops, count);
8504 return ret < 0 ? ret : 0;
8507 static struct ftrace_func_command ftrace_snapshot_cmd = {
8509 .func = ftrace_trace_snapshot_callback,
8512 static __init int register_snapshot_cmd(void)
8514 return register_ftrace_command(&ftrace_snapshot_cmd);
8517 static inline __init int register_snapshot_cmd(void) { return 0; }
8518 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8520 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8522 if (WARN_ON(!tr->dir))
8523 return ERR_PTR(-ENODEV);
8525 /* Top directory uses NULL as the parent */
8526 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8529 /* All sub buffers have a descriptor */
8533 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8535 struct dentry *d_tracer;
8538 return tr->percpu_dir;
8540 d_tracer = tracing_get_dentry(tr);
8541 if (IS_ERR(d_tracer))
8544 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8546 MEM_FAIL(!tr->percpu_dir,
8547 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8549 return tr->percpu_dir;
8552 static struct dentry *
8553 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8554 void *data, long cpu, const struct file_operations *fops)
8556 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8558 if (ret) /* See tracing_get_cpu() */
8559 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8564 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8566 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8567 struct dentry *d_cpu;
8568 char cpu_dir[30]; /* 30 characters should be more than enough */
8573 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8574 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8576 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8580 /* per cpu trace_pipe */
8581 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8582 tr, cpu, &tracing_pipe_fops);
8585 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8586 tr, cpu, &tracing_fops);
8588 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8589 tr, cpu, &tracing_buffers_fops);
8591 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8592 tr, cpu, &tracing_stats_fops);
8594 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8595 tr, cpu, &tracing_entries_fops);
8597 #ifdef CONFIG_TRACER_SNAPSHOT
8598 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8599 tr, cpu, &snapshot_fops);
8601 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8602 tr, cpu, &snapshot_raw_fops);
8606 #ifdef CONFIG_FTRACE_SELFTEST
8607 /* Let selftest have access to static functions in this file */
8608 #include "trace_selftest.c"
8612 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8615 struct trace_option_dentry *topt = filp->private_data;
8618 if (topt->flags->val & topt->opt->bit)
8623 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8627 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8630 struct trace_option_dentry *topt = filp->private_data;
8634 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8638 if (val != 0 && val != 1)
8641 if (!!(topt->flags->val & topt->opt->bit) != val) {
8642 mutex_lock(&trace_types_lock);
8643 ret = __set_tracer_option(topt->tr, topt->flags,
8645 mutex_unlock(&trace_types_lock);
8656 static const struct file_operations trace_options_fops = {
8657 .open = tracing_open_generic,
8658 .read = trace_options_read,
8659 .write = trace_options_write,
8660 .llseek = generic_file_llseek,
8664 * In order to pass in both the trace_array descriptor as well as the index
8665 * to the flag that the trace option file represents, the trace_array
8666 * has a character array of trace_flags_index[], which holds the index
8667 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8668 * The address of this character array is passed to the flag option file
8669 * read/write callbacks.
8671 * In order to extract both the index and the trace_array descriptor,
8672 * get_tr_index() uses the following algorithm.
8676 * As the pointer itself contains the address of the index (remember
8679 * Then to get the trace_array descriptor, by subtracting that index
8680 * from the ptr, we get to the start of the index itself.
8682 * ptr - idx == &index[0]
8684 * Then a simple container_of() from that pointer gets us to the
8685 * trace_array descriptor.
8687 static void get_tr_index(void *data, struct trace_array **ptr,
8688 unsigned int *pindex)
8690 *pindex = *(unsigned char *)data;
8692 *ptr = container_of(data - *pindex, struct trace_array,
8697 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8700 void *tr_index = filp->private_data;
8701 struct trace_array *tr;
8705 get_tr_index(tr_index, &tr, &index);
8707 if (tr->trace_flags & (1 << index))
8712 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8716 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8719 void *tr_index = filp->private_data;
8720 struct trace_array *tr;
8725 get_tr_index(tr_index, &tr, &index);
8727 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8731 if (val != 0 && val != 1)
8734 mutex_lock(&event_mutex);
8735 mutex_lock(&trace_types_lock);
8736 ret = set_tracer_flag(tr, 1 << index, val);
8737 mutex_unlock(&trace_types_lock);
8738 mutex_unlock(&event_mutex);
8748 static const struct file_operations trace_options_core_fops = {
8749 .open = tracing_open_generic,
8750 .read = trace_options_core_read,
8751 .write = trace_options_core_write,
8752 .llseek = generic_file_llseek,
8755 struct dentry *trace_create_file(const char *name,
8757 struct dentry *parent,
8759 const struct file_operations *fops)
8763 ret = tracefs_create_file(name, mode, parent, data, fops);
8765 pr_warn("Could not create tracefs '%s' entry\n", name);
8771 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8773 struct dentry *d_tracer;
8778 d_tracer = tracing_get_dentry(tr);
8779 if (IS_ERR(d_tracer))
8782 tr->options = tracefs_create_dir("options", d_tracer);
8784 pr_warn("Could not create tracefs directory 'options'\n");
8792 create_trace_option_file(struct trace_array *tr,
8793 struct trace_option_dentry *topt,
8794 struct tracer_flags *flags,
8795 struct tracer_opt *opt)
8797 struct dentry *t_options;
8799 t_options = trace_options_init_dentry(tr);
8803 topt->flags = flags;
8807 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8808 t_options, topt, &trace_options_fops);
8813 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8815 struct trace_option_dentry *topts;
8816 struct trace_options *tr_topts;
8817 struct tracer_flags *flags;
8818 struct tracer_opt *opts;
8825 flags = tracer->flags;
8827 if (!flags || !flags->opts)
8831 * If this is an instance, only create flags for tracers
8832 * the instance may have.
8834 if (!trace_ok_for_array(tracer, tr))
8837 for (i = 0; i < tr->nr_topts; i++) {
8838 /* Make sure there's no duplicate flags. */
8839 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8845 for (cnt = 0; opts[cnt].name; cnt++)
8848 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8852 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8859 tr->topts = tr_topts;
8860 tr->topts[tr->nr_topts].tracer = tracer;
8861 tr->topts[tr->nr_topts].topts = topts;
8864 for (cnt = 0; opts[cnt].name; cnt++) {
8865 create_trace_option_file(tr, &topts[cnt], flags,
8867 MEM_FAIL(topts[cnt].entry == NULL,
8868 "Failed to create trace option: %s",
8873 static struct dentry *
8874 create_trace_option_core_file(struct trace_array *tr,
8875 const char *option, long index)
8877 struct dentry *t_options;
8879 t_options = trace_options_init_dentry(tr);
8883 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8884 (void *)&tr->trace_flags_index[index],
8885 &trace_options_core_fops);
8888 static void create_trace_options_dir(struct trace_array *tr)
8890 struct dentry *t_options;
8891 bool top_level = tr == &global_trace;
8894 t_options = trace_options_init_dentry(tr);
8898 for (i = 0; trace_options[i]; i++) {
8900 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8901 create_trace_option_core_file(tr, trace_options[i], i);
8906 rb_simple_read(struct file *filp, char __user *ubuf,
8907 size_t cnt, loff_t *ppos)
8909 struct trace_array *tr = filp->private_data;
8913 r = tracer_tracing_is_on(tr);
8914 r = sprintf(buf, "%d\n", r);
8916 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8920 rb_simple_write(struct file *filp, const char __user *ubuf,
8921 size_t cnt, loff_t *ppos)
8923 struct trace_array *tr = filp->private_data;
8924 struct trace_buffer *buffer = tr->array_buffer.buffer;
8928 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8933 mutex_lock(&trace_types_lock);
8934 if (!!val == tracer_tracing_is_on(tr)) {
8935 val = 0; /* do nothing */
8937 tracer_tracing_on(tr);
8938 if (tr->current_trace->start)
8939 tr->current_trace->start(tr);
8941 tracer_tracing_off(tr);
8942 if (tr->current_trace->stop)
8943 tr->current_trace->stop(tr);
8945 mutex_unlock(&trace_types_lock);
8953 static const struct file_operations rb_simple_fops = {
8954 .open = tracing_open_generic_tr,
8955 .read = rb_simple_read,
8956 .write = rb_simple_write,
8957 .release = tracing_release_generic_tr,
8958 .llseek = default_llseek,
8962 buffer_percent_read(struct file *filp, char __user *ubuf,
8963 size_t cnt, loff_t *ppos)
8965 struct trace_array *tr = filp->private_data;
8969 r = tr->buffer_percent;
8970 r = sprintf(buf, "%d\n", r);
8972 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8976 buffer_percent_write(struct file *filp, const char __user *ubuf,
8977 size_t cnt, loff_t *ppos)
8979 struct trace_array *tr = filp->private_data;
8983 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8993 tr->buffer_percent = val;
9000 static const struct file_operations buffer_percent_fops = {
9001 .open = tracing_open_generic_tr,
9002 .read = buffer_percent_read,
9003 .write = buffer_percent_write,
9004 .release = tracing_release_generic_tr,
9005 .llseek = default_llseek,
9008 static struct dentry *trace_instance_dir;
9011 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9014 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9016 enum ring_buffer_flags rb_flags;
9018 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9022 buf->buffer = ring_buffer_alloc(size, rb_flags);
9026 buf->data = alloc_percpu(struct trace_array_cpu);
9028 ring_buffer_free(buf->buffer);
9033 /* Allocate the first page for all buffers */
9034 set_buffer_entries(&tr->array_buffer,
9035 ring_buffer_size(tr->array_buffer.buffer, 0));
9040 static int allocate_trace_buffers(struct trace_array *tr, int size)
9044 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9048 #ifdef CONFIG_TRACER_MAX_TRACE
9049 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9050 allocate_snapshot ? size : 1);
9051 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9052 ring_buffer_free(tr->array_buffer.buffer);
9053 tr->array_buffer.buffer = NULL;
9054 free_percpu(tr->array_buffer.data);
9055 tr->array_buffer.data = NULL;
9058 tr->allocated_snapshot = allocate_snapshot;
9061 * Only the top level trace array gets its snapshot allocated
9062 * from the kernel command line.
9064 allocate_snapshot = false;
9070 static void free_trace_buffer(struct array_buffer *buf)
9073 ring_buffer_free(buf->buffer);
9075 free_percpu(buf->data);
9080 static void free_trace_buffers(struct trace_array *tr)
9085 free_trace_buffer(&tr->array_buffer);
9087 #ifdef CONFIG_TRACER_MAX_TRACE
9088 free_trace_buffer(&tr->max_buffer);
9092 static void init_trace_flags_index(struct trace_array *tr)
9096 /* Used by the trace options files */
9097 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9098 tr->trace_flags_index[i] = i;
9101 static void __update_tracer_options(struct trace_array *tr)
9105 for (t = trace_types; t; t = t->next)
9106 add_tracer_options(tr, t);
9109 static void update_tracer_options(struct trace_array *tr)
9111 mutex_lock(&trace_types_lock);
9112 __update_tracer_options(tr);
9113 mutex_unlock(&trace_types_lock);
9116 /* Must have trace_types_lock held */
9117 struct trace_array *trace_array_find(const char *instance)
9119 struct trace_array *tr, *found = NULL;
9121 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9122 if (tr->name && strcmp(tr->name, instance) == 0) {
9131 struct trace_array *trace_array_find_get(const char *instance)
9133 struct trace_array *tr;
9135 mutex_lock(&trace_types_lock);
9136 tr = trace_array_find(instance);
9139 mutex_unlock(&trace_types_lock);
9144 static int trace_array_create_dir(struct trace_array *tr)
9148 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9152 ret = event_trace_add_tracer(tr->dir, tr);
9154 tracefs_remove(tr->dir);
9158 init_tracer_tracefs(tr, tr->dir);
9159 __update_tracer_options(tr);
9164 static struct trace_array *trace_array_create(const char *name)
9166 struct trace_array *tr;
9170 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9172 return ERR_PTR(ret);
9174 tr->name = kstrdup(name, GFP_KERNEL);
9178 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9181 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9183 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9185 raw_spin_lock_init(&tr->start_lock);
9187 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9189 tr->current_trace = &nop_trace;
9191 INIT_LIST_HEAD(&tr->systems);
9192 INIT_LIST_HEAD(&tr->events);
9193 INIT_LIST_HEAD(&tr->hist_vars);
9194 INIT_LIST_HEAD(&tr->err_log);
9196 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9199 if (ftrace_allocate_ftrace_ops(tr) < 0)
9202 ftrace_init_trace_array(tr);
9204 init_trace_flags_index(tr);
9206 if (trace_instance_dir) {
9207 ret = trace_array_create_dir(tr);
9211 __trace_early_add_events(tr);
9213 list_add(&tr->list, &ftrace_trace_arrays);
9220 ftrace_free_ftrace_ops(tr);
9221 free_trace_buffers(tr);
9222 free_cpumask_var(tr->tracing_cpumask);
9226 return ERR_PTR(ret);
9229 static int instance_mkdir(const char *name)
9231 struct trace_array *tr;
9234 mutex_lock(&event_mutex);
9235 mutex_lock(&trace_types_lock);
9238 if (trace_array_find(name))
9241 tr = trace_array_create(name);
9243 ret = PTR_ERR_OR_ZERO(tr);
9246 mutex_unlock(&trace_types_lock);
9247 mutex_unlock(&event_mutex);
9252 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9253 * @name: The name of the trace array to be looked up/created.
9255 * Returns pointer to trace array with given name.
9256 * NULL, if it cannot be created.
9258 * NOTE: This function increments the reference counter associated with the
9259 * trace array returned. This makes sure it cannot be freed while in use.
9260 * Use trace_array_put() once the trace array is no longer needed.
9261 * If the trace_array is to be freed, trace_array_destroy() needs to
9262 * be called after the trace_array_put(), or simply let user space delete
9263 * it from the tracefs instances directory. But until the
9264 * trace_array_put() is called, user space can not delete it.
9267 struct trace_array *trace_array_get_by_name(const char *name)
9269 struct trace_array *tr;
9271 mutex_lock(&event_mutex);
9272 mutex_lock(&trace_types_lock);
9274 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9275 if (tr->name && strcmp(tr->name, name) == 0)
9279 tr = trace_array_create(name);
9287 mutex_unlock(&trace_types_lock);
9288 mutex_unlock(&event_mutex);
9291 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9293 static int __remove_instance(struct trace_array *tr)
9297 /* Reference counter for a newly created trace array = 1. */
9298 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9301 list_del(&tr->list);
9303 /* Disable all the flags that were enabled coming in */
9304 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9305 if ((1 << i) & ZEROED_TRACE_FLAGS)
9306 set_tracer_flag(tr, 1 << i, 0);
9309 tracing_set_nop(tr);
9310 clear_ftrace_function_probes(tr);
9311 event_trace_del_tracer(tr);
9312 ftrace_clear_pids(tr);
9313 ftrace_destroy_function_files(tr);
9314 tracefs_remove(tr->dir);
9315 free_percpu(tr->last_func_repeats);
9316 free_trace_buffers(tr);
9318 for (i = 0; i < tr->nr_topts; i++) {
9319 kfree(tr->topts[i].topts);
9323 free_cpumask_var(tr->tracing_cpumask);
9330 int trace_array_destroy(struct trace_array *this_tr)
9332 struct trace_array *tr;
9338 mutex_lock(&event_mutex);
9339 mutex_lock(&trace_types_lock);
9343 /* Making sure trace array exists before destroying it. */
9344 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9345 if (tr == this_tr) {
9346 ret = __remove_instance(tr);
9351 mutex_unlock(&trace_types_lock);
9352 mutex_unlock(&event_mutex);
9356 EXPORT_SYMBOL_GPL(trace_array_destroy);
9358 static int instance_rmdir(const char *name)
9360 struct trace_array *tr;
9363 mutex_lock(&event_mutex);
9364 mutex_lock(&trace_types_lock);
9367 tr = trace_array_find(name);
9369 ret = __remove_instance(tr);
9371 mutex_unlock(&trace_types_lock);
9372 mutex_unlock(&event_mutex);
9377 static __init void create_trace_instances(struct dentry *d_tracer)
9379 struct trace_array *tr;
9381 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9384 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9387 mutex_lock(&event_mutex);
9388 mutex_lock(&trace_types_lock);
9390 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9393 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9394 "Failed to create instance directory\n"))
9398 mutex_unlock(&trace_types_lock);
9399 mutex_unlock(&event_mutex);
9403 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9405 struct trace_event_file *file;
9408 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9409 tr, &show_traces_fops);
9411 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9412 tr, &set_tracer_fops);
9414 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9415 tr, &tracing_cpumask_fops);
9417 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9418 tr, &tracing_iter_fops);
9420 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9423 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9424 tr, &tracing_pipe_fops);
9426 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9427 tr, &tracing_entries_fops);
9429 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9430 tr, &tracing_total_entries_fops);
9432 trace_create_file("free_buffer", 0200, d_tracer,
9433 tr, &tracing_free_buffer_fops);
9435 trace_create_file("trace_marker", 0220, d_tracer,
9436 tr, &tracing_mark_fops);
9438 file = __find_event_file(tr, "ftrace", "print");
9439 if (file && file->dir)
9440 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9441 file, &event_trigger_fops);
9442 tr->trace_marker_file = file;
9444 trace_create_file("trace_marker_raw", 0220, d_tracer,
9445 tr, &tracing_mark_raw_fops);
9447 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9450 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9451 tr, &rb_simple_fops);
9453 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9454 &trace_time_stamp_mode_fops);
9456 tr->buffer_percent = 50;
9458 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9459 tr, &buffer_percent_fops);
9461 create_trace_options_dir(tr);
9463 trace_create_maxlat_file(tr, d_tracer);
9465 if (ftrace_create_function_files(tr, d_tracer))
9466 MEM_FAIL(1, "Could not allocate function filter files");
9468 #ifdef CONFIG_TRACER_SNAPSHOT
9469 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9470 tr, &snapshot_fops);
9473 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9474 tr, &tracing_err_log_fops);
9476 for_each_tracing_cpu(cpu)
9477 tracing_init_tracefs_percpu(tr, cpu);
9479 ftrace_init_tracefs(tr, d_tracer);
9482 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9484 struct vfsmount *mnt;
9485 struct file_system_type *type;
9488 * To maintain backward compatibility for tools that mount
9489 * debugfs to get to the tracing facility, tracefs is automatically
9490 * mounted to the debugfs/tracing directory.
9492 type = get_fs_type("tracefs");
9495 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9496 put_filesystem(type);
9505 * tracing_init_dentry - initialize top level trace array
9507 * This is called when creating files or directories in the tracing
9508 * directory. It is called via fs_initcall() by any of the boot up code
9509 * and expects to return the dentry of the top level tracing directory.
9511 int tracing_init_dentry(void)
9513 struct trace_array *tr = &global_trace;
9515 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9516 pr_warn("Tracing disabled due to lockdown\n");
9520 /* The top level trace array uses NULL as parent */
9524 if (WARN_ON(!tracefs_initialized()))
9528 * As there may still be users that expect the tracing
9529 * files to exist in debugfs/tracing, we must automount
9530 * the tracefs file system there, so older tools still
9531 * work with the newer kernel.
9533 tr->dir = debugfs_create_automount("tracing", NULL,
9534 trace_automount, NULL);
9539 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9540 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9542 static struct workqueue_struct *eval_map_wq __initdata;
9543 static struct work_struct eval_map_work __initdata;
9545 static void __init eval_map_work_func(struct work_struct *work)
9549 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9550 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9553 static int __init trace_eval_init(void)
9555 INIT_WORK(&eval_map_work, eval_map_work_func);
9557 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9559 pr_err("Unable to allocate eval_map_wq\n");
9561 eval_map_work_func(&eval_map_work);
9565 queue_work(eval_map_wq, &eval_map_work);
9569 static int __init trace_eval_sync(void)
9571 /* Make sure the eval map updates are finished */
9573 destroy_workqueue(eval_map_wq);
9577 late_initcall_sync(trace_eval_sync);
9580 #ifdef CONFIG_MODULES
9581 static void trace_module_add_evals(struct module *mod)
9583 if (!mod->num_trace_evals)
9587 * Modules with bad taint do not have events created, do
9588 * not bother with enums either.
9590 if (trace_module_has_bad_taint(mod))
9593 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9596 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9597 static void trace_module_remove_evals(struct module *mod)
9599 union trace_eval_map_item *map;
9600 union trace_eval_map_item **last = &trace_eval_maps;
9602 if (!mod->num_trace_evals)
9605 mutex_lock(&trace_eval_mutex);
9607 map = trace_eval_maps;
9610 if (map->head.mod == mod)
9612 map = trace_eval_jmp_to_tail(map);
9613 last = &map->tail.next;
9614 map = map->tail.next;
9619 *last = trace_eval_jmp_to_tail(map)->tail.next;
9622 mutex_unlock(&trace_eval_mutex);
9625 static inline void trace_module_remove_evals(struct module *mod) { }
9626 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9628 static int trace_module_notify(struct notifier_block *self,
9629 unsigned long val, void *data)
9631 struct module *mod = data;
9634 case MODULE_STATE_COMING:
9635 trace_module_add_evals(mod);
9637 case MODULE_STATE_GOING:
9638 trace_module_remove_evals(mod);
9645 static struct notifier_block trace_module_nb = {
9646 .notifier_call = trace_module_notify,
9649 #endif /* CONFIG_MODULES */
9651 static __init int tracer_init_tracefs(void)
9655 trace_access_lock_init();
9657 ret = tracing_init_dentry();
9663 init_tracer_tracefs(&global_trace, NULL);
9664 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9666 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9667 &global_trace, &tracing_thresh_fops);
9669 trace_create_file("README", TRACE_MODE_READ, NULL,
9670 NULL, &tracing_readme_fops);
9672 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9673 NULL, &tracing_saved_cmdlines_fops);
9675 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9676 NULL, &tracing_saved_cmdlines_size_fops);
9678 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9679 NULL, &tracing_saved_tgids_fops);
9683 trace_create_eval_file(NULL);
9685 #ifdef CONFIG_MODULES
9686 register_module_notifier(&trace_module_nb);
9689 #ifdef CONFIG_DYNAMIC_FTRACE
9690 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9691 NULL, &tracing_dyn_info_fops);
9694 create_trace_instances(NULL);
9696 update_tracer_options(&global_trace);
9701 fs_initcall(tracer_init_tracefs);
9703 static int trace_panic_handler(struct notifier_block *this,
9704 unsigned long event, void *unused)
9706 if (ftrace_dump_on_oops)
9707 ftrace_dump(ftrace_dump_on_oops);
9711 static struct notifier_block trace_panic_notifier = {
9712 .notifier_call = trace_panic_handler,
9714 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9717 static int trace_die_handler(struct notifier_block *self,
9723 if (ftrace_dump_on_oops)
9724 ftrace_dump(ftrace_dump_on_oops);
9732 static struct notifier_block trace_die_notifier = {
9733 .notifier_call = trace_die_handler,
9738 * printk is set to max of 1024, we really don't need it that big.
9739 * Nothing should be printing 1000 characters anyway.
9741 #define TRACE_MAX_PRINT 1000
9744 * Define here KERN_TRACE so that we have one place to modify
9745 * it if we decide to change what log level the ftrace dump
9748 #define KERN_TRACE KERN_EMERG
9751 trace_printk_seq(struct trace_seq *s)
9753 /* Probably should print a warning here. */
9754 if (s->seq.len >= TRACE_MAX_PRINT)
9755 s->seq.len = TRACE_MAX_PRINT;
9758 * More paranoid code. Although the buffer size is set to
9759 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9760 * an extra layer of protection.
9762 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9763 s->seq.len = s->seq.size - 1;
9765 /* should be zero ended, but we are paranoid. */
9766 s->buffer[s->seq.len] = 0;
9768 printk(KERN_TRACE "%s", s->buffer);
9773 void trace_init_global_iter(struct trace_iterator *iter)
9775 iter->tr = &global_trace;
9776 iter->trace = iter->tr->current_trace;
9777 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9778 iter->array_buffer = &global_trace.array_buffer;
9780 if (iter->trace && iter->trace->open)
9781 iter->trace->open(iter);
9783 /* Annotate start of buffers if we had overruns */
9784 if (ring_buffer_overruns(iter->array_buffer->buffer))
9785 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9787 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9788 if (trace_clocks[iter->tr->clock_id].in_ns)
9789 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9792 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9794 /* use static because iter can be a bit big for the stack */
9795 static struct trace_iterator iter;
9796 static atomic_t dump_running;
9797 struct trace_array *tr = &global_trace;
9798 unsigned int old_userobj;
9799 unsigned long flags;
9802 /* Only allow one dump user at a time. */
9803 if (atomic_inc_return(&dump_running) != 1) {
9804 atomic_dec(&dump_running);
9809 * Always turn off tracing when we dump.
9810 * We don't need to show trace output of what happens
9811 * between multiple crashes.
9813 * If the user does a sysrq-z, then they can re-enable
9814 * tracing with echo 1 > tracing_on.
9818 local_irq_save(flags);
9820 /* Simulate the iterator */
9821 trace_init_global_iter(&iter);
9822 /* Can not use kmalloc for iter.temp and iter.fmt */
9823 iter.temp = static_temp_buf;
9824 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9825 iter.fmt = static_fmt_buf;
9826 iter.fmt_size = STATIC_FMT_BUF_SIZE;
9828 for_each_tracing_cpu(cpu) {
9829 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9832 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9834 /* don't look at user memory in panic mode */
9835 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9837 switch (oops_dump_mode) {
9839 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9842 iter.cpu_file = raw_smp_processor_id();
9847 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9848 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9851 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9853 /* Did function tracer already get disabled? */
9854 if (ftrace_is_dead()) {
9855 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9856 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9860 * We need to stop all tracing on all CPUS to read
9861 * the next buffer. This is a bit expensive, but is
9862 * not done often. We fill all what we can read,
9863 * and then release the locks again.
9866 while (!trace_empty(&iter)) {
9869 printk(KERN_TRACE "---------------------------------\n");
9873 trace_iterator_reset(&iter);
9874 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9876 if (trace_find_next_entry_inc(&iter) != NULL) {
9879 ret = print_trace_line(&iter);
9880 if (ret != TRACE_TYPE_NO_CONSUME)
9881 trace_consume(&iter);
9883 touch_nmi_watchdog();
9885 trace_printk_seq(&iter.seq);
9889 printk(KERN_TRACE " (ftrace buffer empty)\n");
9891 printk(KERN_TRACE "---------------------------------\n");
9894 tr->trace_flags |= old_userobj;
9896 for_each_tracing_cpu(cpu) {
9897 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9899 atomic_dec(&dump_running);
9900 local_irq_restore(flags);
9902 EXPORT_SYMBOL_GPL(ftrace_dump);
9904 #define WRITE_BUFSIZE 4096
9906 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9907 size_t count, loff_t *ppos,
9908 int (*createfn)(const char *))
9910 char *kbuf, *buf, *tmp;
9915 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9919 while (done < count) {
9920 size = count - done;
9922 if (size >= WRITE_BUFSIZE)
9923 size = WRITE_BUFSIZE - 1;
9925 if (copy_from_user(kbuf, buffer + done, size)) {
9932 tmp = strchr(buf, '\n');
9935 size = tmp - buf + 1;
9938 if (done + size < count) {
9941 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9942 pr_warn("Line length is too long: Should be less than %d\n",
9950 /* Remove comments */
9951 tmp = strchr(buf, '#');
9956 ret = createfn(buf);
9961 } while (done < count);
9971 __init static int tracer_alloc_buffers(void)
9977 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9978 pr_warn("Tracing disabled due to lockdown\n");
9983 * Make sure we don't accidentally add more trace options
9984 * than we have bits for.
9986 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9988 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9991 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9992 goto out_free_buffer_mask;
9994 /* Only allocate trace_printk buffers if a trace_printk exists */
9995 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9996 /* Must be called before global_trace.buffer is allocated */
9997 trace_printk_init_buffers();
9999 /* To save memory, keep the ring buffer size to its minimum */
10000 if (ring_buffer_expanded)
10001 ring_buf_size = trace_buf_size;
10005 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10006 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10008 raw_spin_lock_init(&global_trace.start_lock);
10011 * The prepare callbacks allocates some memory for the ring buffer. We
10012 * don't free the buffer if the CPU goes down. If we were to free
10013 * the buffer, then the user would lose any trace that was in the
10014 * buffer. The memory will be removed once the "instance" is removed.
10016 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10017 "trace/RB:preapre", trace_rb_cpu_prepare,
10020 goto out_free_cpumask;
10021 /* Used for event triggers */
10023 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10025 goto out_rm_hp_state;
10027 if (trace_create_savedcmd() < 0)
10028 goto out_free_temp_buffer;
10030 /* TODO: make the number of buffers hot pluggable with CPUS */
10031 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10032 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10033 goto out_free_savedcmd;
10036 if (global_trace.buffer_disabled)
10039 if (trace_boot_clock) {
10040 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10042 pr_warn("Trace clock %s not defined, going back to default\n",
10047 * register_tracer() might reference current_trace, so it
10048 * needs to be set before we register anything. This is
10049 * just a bootstrap of current_trace anyway.
10051 global_trace.current_trace = &nop_trace;
10053 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10055 ftrace_init_global_array_ops(&global_trace);
10057 init_trace_flags_index(&global_trace);
10059 register_tracer(&nop_trace);
10061 /* Function tracing may start here (via kernel command line) */
10062 init_function_trace();
10064 /* All seems OK, enable tracing */
10065 tracing_disabled = 0;
10067 atomic_notifier_chain_register(&panic_notifier_list,
10068 &trace_panic_notifier);
10070 register_die_notifier(&trace_die_notifier);
10072 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10074 INIT_LIST_HEAD(&global_trace.systems);
10075 INIT_LIST_HEAD(&global_trace.events);
10076 INIT_LIST_HEAD(&global_trace.hist_vars);
10077 INIT_LIST_HEAD(&global_trace.err_log);
10078 list_add(&global_trace.list, &ftrace_trace_arrays);
10080 apply_trace_boot_options();
10082 register_snapshot_cmd();
10089 free_saved_cmdlines_buffer(savedcmd);
10090 out_free_temp_buffer:
10091 ring_buffer_free(temp_buffer);
10093 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10095 free_cpumask_var(global_trace.tracing_cpumask);
10096 out_free_buffer_mask:
10097 free_cpumask_var(tracing_buffer_mask);
10102 void __init early_trace_init(void)
10104 if (tracepoint_printk) {
10105 tracepoint_print_iter =
10106 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10107 if (MEM_FAIL(!tracepoint_print_iter,
10108 "Failed to allocate trace iterator\n"))
10109 tracepoint_printk = 0;
10111 static_key_enable(&tracepoint_printk_key.key);
10113 tracer_alloc_buffers();
10116 void __init trace_init(void)
10118 trace_event_init();
10121 __init static void clear_boot_tracer(void)
10124 * The default tracer at boot buffer is an init section.
10125 * This function is called in lateinit. If we did not
10126 * find the boot tracer, then clear it out, to prevent
10127 * later registration from accessing the buffer that is
10128 * about to be freed.
10130 if (!default_bootup_tracer)
10133 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10134 default_bootup_tracer);
10135 default_bootup_tracer = NULL;
10138 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10139 __init static void tracing_set_default_clock(void)
10141 /* sched_clock_stable() is determined in late_initcall */
10142 if (!trace_boot_clock && !sched_clock_stable()) {
10143 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10144 pr_warn("Can not set tracing clock due to lockdown\n");
10148 printk(KERN_WARNING
10149 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10150 "If you want to keep using the local clock, then add:\n"
10151 " \"trace_clock=local\"\n"
10152 "on the kernel command line\n");
10153 tracing_set_clock(&global_trace, "global");
10157 static inline void tracing_set_default_clock(void) { }
10160 __init static int late_trace_init(void)
10162 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10163 static_key_disable(&tracepoint_printk_key.key);
10164 tracepoint_printk = 0;
10167 tracing_set_default_clock();
10168 clear_boot_tracer();
10172 late_initcall_sync(late_trace_init);