1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
54 #include "trace_output.h"
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
60 bool ring_buffer_expanded;
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
69 static bool __read_mostly tracing_selftest_running;
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
75 bool __read_mostly tracing_selftest_disabled;
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
117 static int tracing_disabled = 1;
119 cpumask_var_t __read_mostly tracing_buffer_mask;
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
137 enum ftrace_dump_mode ftrace_dump_on_oops;
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
146 unsigned long length;
149 union trace_eval_map_item;
151 struct trace_eval_map_tail {
153 * "end" is first and points to NULL as it must be different
154 * than "mod" or "eval_string"
156 union trace_eval_map_item *next;
157 const char *end; /* points to NULL */
160 static DEFINE_MUTEX(trace_eval_mutex);
163 * The trace_eval_maps are saved in an array with two extra elements,
164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
167 * pointer to the next array of saved eval_map items.
169 union trace_eval_map_item {
170 struct trace_eval_map map;
171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
181 unsigned int trace_ctx);
183 #define MAX_TRACER_SIZE 100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
187 static bool allocate_snapshot;
189 static int __init set_cmdline_ftrace(char *str)
191 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
192 default_bootup_tracer = bootup_tracer_buf;
193 /* We are using ftrace early, expand it */
194 ring_buffer_expanded = true;
197 __setup("ftrace=", set_cmdline_ftrace);
199 static int __init set_ftrace_dump_on_oops(char *str)
201 if (*str++ != '=' || !*str || !strcmp("1", str)) {
202 ftrace_dump_on_oops = DUMP_ALL;
206 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
207 ftrace_dump_on_oops = DUMP_ORIG;
213 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
215 static int __init stop_trace_on_warning(char *str)
217 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
218 __disable_trace_on_warning = 1;
221 __setup("traceoff_on_warning", stop_trace_on_warning);
223 static int __init boot_alloc_snapshot(char *str)
225 allocate_snapshot = true;
226 /* We also need the main ring buffer expanded */
227 ring_buffer_expanded = true;
230 __setup("alloc_snapshot", boot_alloc_snapshot);
233 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
235 static int __init set_trace_boot_options(char *str)
237 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
240 __setup("trace_options=", set_trace_boot_options);
242 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
243 static char *trace_boot_clock __initdata;
245 static int __init set_trace_boot_clock(char *str)
247 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
248 trace_boot_clock = trace_boot_clock_buf;
251 __setup("trace_clock=", set_trace_boot_clock);
253 static int __init set_tracepoint_printk(char *str)
255 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
256 tracepoint_printk = 1;
259 __setup("tp_printk", set_tracepoint_printk);
261 static int __init set_tracepoint_printk_stop(char *str)
263 tracepoint_printk_stop_on_boot = true;
266 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
268 unsigned long long ns2usecs(u64 nsec)
276 trace_process_export(struct trace_export *export,
277 struct ring_buffer_event *event, int flag)
279 struct trace_entry *entry;
280 unsigned int size = 0;
282 if (export->flags & flag) {
283 entry = ring_buffer_event_data(event);
284 size = ring_buffer_event_length(event);
285 export->write(export, entry, size);
289 static DEFINE_MUTEX(ftrace_export_lock);
291 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
293 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
294 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
295 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
297 static inline void ftrace_exports_enable(struct trace_export *export)
299 if (export->flags & TRACE_EXPORT_FUNCTION)
300 static_branch_inc(&trace_function_exports_enabled);
302 if (export->flags & TRACE_EXPORT_EVENT)
303 static_branch_inc(&trace_event_exports_enabled);
305 if (export->flags & TRACE_EXPORT_MARKER)
306 static_branch_inc(&trace_marker_exports_enabled);
309 static inline void ftrace_exports_disable(struct trace_export *export)
311 if (export->flags & TRACE_EXPORT_FUNCTION)
312 static_branch_dec(&trace_function_exports_enabled);
314 if (export->flags & TRACE_EXPORT_EVENT)
315 static_branch_dec(&trace_event_exports_enabled);
317 if (export->flags & TRACE_EXPORT_MARKER)
318 static_branch_dec(&trace_marker_exports_enabled);
321 static void ftrace_exports(struct ring_buffer_event *event, int flag)
323 struct trace_export *export;
325 preempt_disable_notrace();
327 export = rcu_dereference_raw_check(ftrace_exports_list);
329 trace_process_export(export, event, flag);
330 export = rcu_dereference_raw_check(export->next);
333 preempt_enable_notrace();
337 add_trace_export(struct trace_export **list, struct trace_export *export)
339 rcu_assign_pointer(export->next, *list);
341 * We are entering export into the list but another
342 * CPU might be walking that list. We need to make sure
343 * the export->next pointer is valid before another CPU sees
344 * the export pointer included into the list.
346 rcu_assign_pointer(*list, export);
350 rm_trace_export(struct trace_export **list, struct trace_export *export)
352 struct trace_export **p;
354 for (p = list; *p != NULL; p = &(*p)->next)
361 rcu_assign_pointer(*p, (*p)->next);
367 add_ftrace_export(struct trace_export **list, struct trace_export *export)
369 ftrace_exports_enable(export);
371 add_trace_export(list, export);
375 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
379 ret = rm_trace_export(list, export);
380 ftrace_exports_disable(export);
385 int register_ftrace_export(struct trace_export *export)
387 if (WARN_ON_ONCE(!export->write))
390 mutex_lock(&ftrace_export_lock);
392 add_ftrace_export(&ftrace_exports_list, export);
394 mutex_unlock(&ftrace_export_lock);
398 EXPORT_SYMBOL_GPL(register_ftrace_export);
400 int unregister_ftrace_export(struct trace_export *export)
404 mutex_lock(&ftrace_export_lock);
406 ret = rm_ftrace_export(&ftrace_exports_list, export);
408 mutex_unlock(&ftrace_export_lock);
412 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
414 /* trace_flags holds trace_options default values */
415 #define TRACE_DEFAULT_FLAGS \
416 (FUNCTION_DEFAULT_FLAGS | \
417 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
418 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
419 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
420 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
423 /* trace_options that are only supported by global_trace */
424 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
425 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
427 /* trace_flags that are default zero for instances */
428 #define ZEROED_TRACE_FLAGS \
429 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
432 * The global_trace is the descriptor that holds the top-level tracing
433 * buffers for the live tracing.
435 static struct trace_array global_trace = {
436 .trace_flags = TRACE_DEFAULT_FLAGS,
439 LIST_HEAD(ftrace_trace_arrays);
441 int trace_array_get(struct trace_array *this_tr)
443 struct trace_array *tr;
446 mutex_lock(&trace_types_lock);
447 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
454 mutex_unlock(&trace_types_lock);
459 static void __trace_array_put(struct trace_array *this_tr)
461 WARN_ON(!this_tr->ref);
466 * trace_array_put - Decrement the reference counter for this trace array.
467 * @this_tr : pointer to the trace array
469 * NOTE: Use this when we no longer need the trace array returned by
470 * trace_array_get_by_name(). This ensures the trace array can be later
474 void trace_array_put(struct trace_array *this_tr)
479 mutex_lock(&trace_types_lock);
480 __trace_array_put(this_tr);
481 mutex_unlock(&trace_types_lock);
483 EXPORT_SYMBOL_GPL(trace_array_put);
485 int tracing_check_open_get_tr(struct trace_array *tr)
489 ret = security_locked_down(LOCKDOWN_TRACEFS);
493 if (tracing_disabled)
496 if (tr && trace_array_get(tr) < 0)
502 int call_filter_check_discard(struct trace_event_call *call, void *rec,
503 struct trace_buffer *buffer,
504 struct ring_buffer_event *event)
506 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
507 !filter_match_preds(call->filter, rec)) {
508 __trace_event_discard_commit(buffer, event);
516 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
517 * @filtered_pids: The list of pids to check
518 * @search_pid: The PID to find in @filtered_pids
520 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
523 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
525 return trace_pid_list_is_set(filtered_pids, search_pid);
529 * trace_ignore_this_task - should a task be ignored for tracing
530 * @filtered_pids: The list of pids to check
531 * @filtered_no_pids: The list of pids not to be traced
532 * @task: The task that should be ignored if not filtered
534 * Checks if @task should be traced or not from @filtered_pids.
535 * Returns true if @task should *NOT* be traced.
536 * Returns false if @task should be traced.
539 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
540 struct trace_pid_list *filtered_no_pids,
541 struct task_struct *task)
544 * If filtered_no_pids is not empty, and the task's pid is listed
545 * in filtered_no_pids, then return true.
546 * Otherwise, if filtered_pids is empty, that means we can
547 * trace all tasks. If it has content, then only trace pids
548 * within filtered_pids.
551 return (filtered_pids &&
552 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
554 trace_find_filtered_pid(filtered_no_pids, task->pid));
558 * trace_filter_add_remove_task - Add or remove a task from a pid_list
559 * @pid_list: The list to modify
560 * @self: The current task for fork or NULL for exit
561 * @task: The task to add or remove
563 * If adding a task, if @self is defined, the task is only added if @self
564 * is also included in @pid_list. This happens on fork and tasks should
565 * only be added when the parent is listed. If @self is NULL, then the
566 * @task pid will be removed from the list, which would happen on exit
569 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
570 struct task_struct *self,
571 struct task_struct *task)
576 /* For forks, we only add if the forking task is listed */
578 if (!trace_find_filtered_pid(pid_list, self->pid))
582 /* "self" is set for forks, and NULL for exits */
584 trace_pid_list_set(pid_list, task->pid);
586 trace_pid_list_clear(pid_list, task->pid);
590 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
591 * @pid_list: The pid list to show
592 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
593 * @pos: The position of the file
595 * This is used by the seq_file "next" operation to iterate the pids
596 * listed in a trace_pid_list structure.
598 * Returns the pid+1 as we want to display pid of zero, but NULL would
599 * stop the iteration.
601 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
603 long pid = (unsigned long)v;
608 /* pid already is +1 of the actual previous bit */
609 if (trace_pid_list_next(pid_list, pid, &next) < 0)
614 /* Return pid + 1 to allow zero to be represented */
615 return (void *)(pid + 1);
619 * trace_pid_start - Used for seq_file to start reading pid lists
620 * @pid_list: The pid list to show
621 * @pos: The position of the file
623 * This is used by seq_file "start" operation to start the iteration
626 * Returns the pid+1 as we want to display pid of zero, but NULL would
627 * stop the iteration.
629 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
635 if (trace_pid_list_first(pid_list, &first) < 0)
640 /* Return pid + 1 so that zero can be the exit value */
641 for (pid++; pid && l < *pos;
642 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
648 * trace_pid_show - show the current pid in seq_file processing
649 * @m: The seq_file structure to write into
650 * @v: A void pointer of the pid (+1) value to display
652 * Can be directly used by seq_file operations to display the current
655 int trace_pid_show(struct seq_file *m, void *v)
657 unsigned long pid = (unsigned long)v - 1;
659 seq_printf(m, "%lu\n", pid);
663 /* 128 should be much more than enough */
664 #define PID_BUF_SIZE 127
666 int trace_pid_write(struct trace_pid_list *filtered_pids,
667 struct trace_pid_list **new_pid_list,
668 const char __user *ubuf, size_t cnt)
670 struct trace_pid_list *pid_list;
671 struct trace_parser parser;
679 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
683 * Always recreate a new array. The write is an all or nothing
684 * operation. Always create a new array when adding new pids by
685 * the user. If the operation fails, then the current list is
688 pid_list = trace_pid_list_alloc();
690 trace_parser_put(&parser);
695 /* copy the current bits to the new max */
696 ret = trace_pid_list_first(filtered_pids, &pid);
698 trace_pid_list_set(pid_list, pid);
699 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
709 ret = trace_get_user(&parser, ubuf, cnt, &pos);
710 if (ret < 0 || !trace_parser_loaded(&parser))
718 if (kstrtoul(parser.buffer, 0, &val))
723 if (trace_pid_list_set(pid_list, pid) < 0) {
729 trace_parser_clear(&parser);
732 trace_parser_put(&parser);
735 trace_pid_list_free(pid_list);
740 /* Cleared the list of pids */
741 trace_pid_list_free(pid_list);
746 *new_pid_list = pid_list;
751 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
755 /* Early boot up does not have a buffer yet */
757 return trace_clock_local();
759 ts = ring_buffer_time_stamp(buf->buffer);
760 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
765 u64 ftrace_now(int cpu)
767 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
771 * tracing_is_enabled - Show if global_trace has been enabled
773 * Shows if the global trace has been enabled or not. It uses the
774 * mirror flag "buffer_disabled" to be used in fast paths such as for
775 * the irqsoff tracer. But it may be inaccurate due to races. If you
776 * need to know the accurate state, use tracing_is_on() which is a little
777 * slower, but accurate.
779 int tracing_is_enabled(void)
782 * For quick access (irqsoff uses this in fast path), just
783 * return the mirror variable of the state of the ring buffer.
784 * It's a little racy, but we don't really care.
787 return !global_trace.buffer_disabled;
791 * trace_buf_size is the size in bytes that is allocated
792 * for a buffer. Note, the number of bytes is always rounded
795 * This number is purposely set to a low number of 16384.
796 * If the dump on oops happens, it will be much appreciated
797 * to not have to wait for all that output. Anyway this can be
798 * boot time and run time configurable.
800 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
802 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
804 /* trace_types holds a link list of available tracers. */
805 static struct tracer *trace_types __read_mostly;
808 * trace_types_lock is used to protect the trace_types list.
810 DEFINE_MUTEX(trace_types_lock);
813 * serialize the access of the ring buffer
815 * ring buffer serializes readers, but it is low level protection.
816 * The validity of the events (which returns by ring_buffer_peek() ..etc)
817 * are not protected by ring buffer.
819 * The content of events may become garbage if we allow other process consumes
820 * these events concurrently:
821 * A) the page of the consumed events may become a normal page
822 * (not reader page) in ring buffer, and this page will be rewritten
823 * by events producer.
824 * B) The page of the consumed events may become a page for splice_read,
825 * and this page will be returned to system.
827 * These primitives allow multi process access to different cpu ring buffer
830 * These primitives don't distinguish read-only and read-consume access.
831 * Multi read-only access are also serialized.
835 static DECLARE_RWSEM(all_cpu_access_lock);
836 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
838 static inline void trace_access_lock(int cpu)
840 if (cpu == RING_BUFFER_ALL_CPUS) {
841 /* gain it for accessing the whole ring buffer. */
842 down_write(&all_cpu_access_lock);
844 /* gain it for accessing a cpu ring buffer. */
846 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
847 down_read(&all_cpu_access_lock);
849 /* Secondly block other access to this @cpu ring buffer. */
850 mutex_lock(&per_cpu(cpu_access_lock, cpu));
854 static inline void trace_access_unlock(int cpu)
856 if (cpu == RING_BUFFER_ALL_CPUS) {
857 up_write(&all_cpu_access_lock);
859 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
860 up_read(&all_cpu_access_lock);
864 static inline void trace_access_lock_init(void)
868 for_each_possible_cpu(cpu)
869 mutex_init(&per_cpu(cpu_access_lock, cpu));
874 static DEFINE_MUTEX(access_lock);
876 static inline void trace_access_lock(int cpu)
879 mutex_lock(&access_lock);
882 static inline void trace_access_unlock(int cpu)
885 mutex_unlock(&access_lock);
888 static inline void trace_access_lock_init(void)
894 #ifdef CONFIG_STACKTRACE
895 static void __ftrace_trace_stack(struct trace_buffer *buffer,
896 unsigned int trace_ctx,
897 int skip, struct pt_regs *regs);
898 static inline void ftrace_trace_stack(struct trace_array *tr,
899 struct trace_buffer *buffer,
900 unsigned int trace_ctx,
901 int skip, struct pt_regs *regs);
904 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
905 unsigned int trace_ctx,
906 int skip, struct pt_regs *regs)
909 static inline void ftrace_trace_stack(struct trace_array *tr,
910 struct trace_buffer *buffer,
911 unsigned long trace_ctx,
912 int skip, struct pt_regs *regs)
918 static __always_inline void
919 trace_event_setup(struct ring_buffer_event *event,
920 int type, unsigned int trace_ctx)
922 struct trace_entry *ent = ring_buffer_event_data(event);
924 tracing_generic_entry_update(ent, type, trace_ctx);
927 static __always_inline struct ring_buffer_event *
928 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
931 unsigned int trace_ctx)
933 struct ring_buffer_event *event;
935 event = ring_buffer_lock_reserve(buffer, len);
937 trace_event_setup(event, type, trace_ctx);
942 void tracer_tracing_on(struct trace_array *tr)
944 if (tr->array_buffer.buffer)
945 ring_buffer_record_on(tr->array_buffer.buffer);
947 * This flag is looked at when buffers haven't been allocated
948 * yet, or by some tracers (like irqsoff), that just want to
949 * know if the ring buffer has been disabled, but it can handle
950 * races of where it gets disabled but we still do a record.
951 * As the check is in the fast path of the tracers, it is more
952 * important to be fast than accurate.
954 tr->buffer_disabled = 0;
955 /* Make the flag seen by readers */
960 * tracing_on - enable tracing buffers
962 * This function enables tracing buffers that may have been
963 * disabled with tracing_off.
965 void tracing_on(void)
967 tracer_tracing_on(&global_trace);
969 EXPORT_SYMBOL_GPL(tracing_on);
972 static __always_inline void
973 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
975 __this_cpu_write(trace_taskinfo_save, true);
977 /* If this is the temp buffer, we need to commit fully */
978 if (this_cpu_read(trace_buffered_event) == event) {
979 /* Length is in event->array[0] */
980 ring_buffer_write(buffer, event->array[0], &event->array[1]);
981 /* Release the temp buffer */
982 this_cpu_dec(trace_buffered_event_cnt);
983 /* ring_buffer_unlock_commit() enables preemption */
984 preempt_enable_notrace();
986 ring_buffer_unlock_commit(buffer, event);
990 * __trace_puts - write a constant string into the trace buffer.
991 * @ip: The address of the caller
992 * @str: The constant string to write
993 * @size: The size of the string.
995 int __trace_puts(unsigned long ip, const char *str, int size)
997 struct ring_buffer_event *event;
998 struct trace_buffer *buffer;
999 struct print_entry *entry;
1000 unsigned int trace_ctx;
1003 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1006 if (unlikely(tracing_selftest_running || tracing_disabled))
1009 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1011 trace_ctx = tracing_gen_ctx();
1012 buffer = global_trace.array_buffer.buffer;
1013 ring_buffer_nest_start(buffer);
1014 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1021 entry = ring_buffer_event_data(event);
1024 memcpy(&entry->buf, str, size);
1026 /* Add a newline if necessary */
1027 if (entry->buf[size - 1] != '\n') {
1028 entry->buf[size] = '\n';
1029 entry->buf[size + 1] = '\0';
1031 entry->buf[size] = '\0';
1033 __buffer_unlock_commit(buffer, event);
1034 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1036 ring_buffer_nest_end(buffer);
1039 EXPORT_SYMBOL_GPL(__trace_puts);
1042 * __trace_bputs - write the pointer to a constant string into trace buffer
1043 * @ip: The address of the caller
1044 * @str: The constant string to write to the buffer to
1046 int __trace_bputs(unsigned long ip, const char *str)
1048 struct ring_buffer_event *event;
1049 struct trace_buffer *buffer;
1050 struct bputs_entry *entry;
1051 unsigned int trace_ctx;
1052 int size = sizeof(struct bputs_entry);
1055 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1058 if (unlikely(tracing_selftest_running || tracing_disabled))
1061 trace_ctx = tracing_gen_ctx();
1062 buffer = global_trace.array_buffer.buffer;
1064 ring_buffer_nest_start(buffer);
1065 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1070 entry = ring_buffer_event_data(event);
1074 __buffer_unlock_commit(buffer, event);
1075 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1079 ring_buffer_nest_end(buffer);
1082 EXPORT_SYMBOL_GPL(__trace_bputs);
1084 #ifdef CONFIG_TRACER_SNAPSHOT
1085 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1088 struct tracer *tracer = tr->current_trace;
1089 unsigned long flags;
1092 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1093 internal_trace_puts("*** snapshot is being ignored ***\n");
1097 if (!tr->allocated_snapshot) {
1098 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1099 internal_trace_puts("*** stopping trace here! ***\n");
1104 /* Note, snapshot can not be used when the tracer uses it */
1105 if (tracer->use_max_tr) {
1106 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1107 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1111 local_irq_save(flags);
1112 update_max_tr(tr, current, smp_processor_id(), cond_data);
1113 local_irq_restore(flags);
1116 void tracing_snapshot_instance(struct trace_array *tr)
1118 tracing_snapshot_instance_cond(tr, NULL);
1122 * tracing_snapshot - take a snapshot of the current buffer.
1124 * This causes a swap between the snapshot buffer and the current live
1125 * tracing buffer. You can use this to take snapshots of the live
1126 * trace when some condition is triggered, but continue to trace.
1128 * Note, make sure to allocate the snapshot with either
1129 * a tracing_snapshot_alloc(), or by doing it manually
1130 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1132 * If the snapshot buffer is not allocated, it will stop tracing.
1133 * Basically making a permanent snapshot.
1135 void tracing_snapshot(void)
1137 struct trace_array *tr = &global_trace;
1139 tracing_snapshot_instance(tr);
1141 EXPORT_SYMBOL_GPL(tracing_snapshot);
1144 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1145 * @tr: The tracing instance to snapshot
1146 * @cond_data: The data to be tested conditionally, and possibly saved
1148 * This is the same as tracing_snapshot() except that the snapshot is
1149 * conditional - the snapshot will only happen if the
1150 * cond_snapshot.update() implementation receiving the cond_data
1151 * returns true, which means that the trace array's cond_snapshot
1152 * update() operation used the cond_data to determine whether the
1153 * snapshot should be taken, and if it was, presumably saved it along
1154 * with the snapshot.
1156 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1158 tracing_snapshot_instance_cond(tr, cond_data);
1160 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1163 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1164 * @tr: The tracing instance
1166 * When the user enables a conditional snapshot using
1167 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1168 * with the snapshot. This accessor is used to retrieve it.
1170 * Should not be called from cond_snapshot.update(), since it takes
1171 * the tr->max_lock lock, which the code calling
1172 * cond_snapshot.update() has already done.
1174 * Returns the cond_data associated with the trace array's snapshot.
1176 void *tracing_cond_snapshot_data(struct trace_array *tr)
1178 void *cond_data = NULL;
1180 arch_spin_lock(&tr->max_lock);
1182 if (tr->cond_snapshot)
1183 cond_data = tr->cond_snapshot->cond_data;
1185 arch_spin_unlock(&tr->max_lock);
1189 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1191 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1192 struct array_buffer *size_buf, int cpu_id);
1193 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1195 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1199 if (!tr->allocated_snapshot) {
1201 /* allocate spare buffer */
1202 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1203 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1207 tr->allocated_snapshot = true;
1213 static void free_snapshot(struct trace_array *tr)
1216 * We don't free the ring buffer. instead, resize it because
1217 * The max_tr ring buffer has some state (e.g. ring->clock) and
1218 * we want preserve it.
1220 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1221 set_buffer_entries(&tr->max_buffer, 1);
1222 tracing_reset_online_cpus(&tr->max_buffer);
1223 tr->allocated_snapshot = false;
1227 * tracing_alloc_snapshot - allocate snapshot buffer.
1229 * This only allocates the snapshot buffer if it isn't already
1230 * allocated - it doesn't also take a snapshot.
1232 * This is meant to be used in cases where the snapshot buffer needs
1233 * to be set up for events that can't sleep but need to be able to
1234 * trigger a snapshot.
1236 int tracing_alloc_snapshot(void)
1238 struct trace_array *tr = &global_trace;
1241 ret = tracing_alloc_snapshot_instance(tr);
1246 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1249 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1251 * This is similar to tracing_snapshot(), but it will allocate the
1252 * snapshot buffer if it isn't already allocated. Use this only
1253 * where it is safe to sleep, as the allocation may sleep.
1255 * This causes a swap between the snapshot buffer and the current live
1256 * tracing buffer. You can use this to take snapshots of the live
1257 * trace when some condition is triggered, but continue to trace.
1259 void tracing_snapshot_alloc(void)
1263 ret = tracing_alloc_snapshot();
1269 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1272 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1273 * @tr: The tracing instance
1274 * @cond_data: User data to associate with the snapshot
1275 * @update: Implementation of the cond_snapshot update function
1277 * Check whether the conditional snapshot for the given instance has
1278 * already been enabled, or if the current tracer is already using a
1279 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1280 * save the cond_data and update function inside.
1282 * Returns 0 if successful, error otherwise.
1284 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1285 cond_update_fn_t update)
1287 struct cond_snapshot *cond_snapshot;
1290 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1294 cond_snapshot->cond_data = cond_data;
1295 cond_snapshot->update = update;
1297 mutex_lock(&trace_types_lock);
1299 ret = tracing_alloc_snapshot_instance(tr);
1303 if (tr->current_trace->use_max_tr) {
1309 * The cond_snapshot can only change to NULL without the
1310 * trace_types_lock. We don't care if we race with it going
1311 * to NULL, but we want to make sure that it's not set to
1312 * something other than NULL when we get here, which we can
1313 * do safely with only holding the trace_types_lock and not
1314 * having to take the max_lock.
1316 if (tr->cond_snapshot) {
1321 arch_spin_lock(&tr->max_lock);
1322 tr->cond_snapshot = cond_snapshot;
1323 arch_spin_unlock(&tr->max_lock);
1325 mutex_unlock(&trace_types_lock);
1330 mutex_unlock(&trace_types_lock);
1331 kfree(cond_snapshot);
1334 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1337 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1338 * @tr: The tracing instance
1340 * Check whether the conditional snapshot for the given instance is
1341 * enabled; if so, free the cond_snapshot associated with it,
1342 * otherwise return -EINVAL.
1344 * Returns 0 if successful, error otherwise.
1346 int tracing_snapshot_cond_disable(struct trace_array *tr)
1350 arch_spin_lock(&tr->max_lock);
1352 if (!tr->cond_snapshot)
1355 kfree(tr->cond_snapshot);
1356 tr->cond_snapshot = NULL;
1359 arch_spin_unlock(&tr->max_lock);
1363 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1365 void tracing_snapshot(void)
1367 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1369 EXPORT_SYMBOL_GPL(tracing_snapshot);
1370 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1372 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1374 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1375 int tracing_alloc_snapshot(void)
1377 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1380 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1381 void tracing_snapshot_alloc(void)
1386 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1387 void *tracing_cond_snapshot_data(struct trace_array *tr)
1391 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1392 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1396 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1397 int tracing_snapshot_cond_disable(struct trace_array *tr)
1401 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1402 #endif /* CONFIG_TRACER_SNAPSHOT */
1404 void tracer_tracing_off(struct trace_array *tr)
1406 if (tr->array_buffer.buffer)
1407 ring_buffer_record_off(tr->array_buffer.buffer);
1409 * This flag is looked at when buffers haven't been allocated
1410 * yet, or by some tracers (like irqsoff), that just want to
1411 * know if the ring buffer has been disabled, but it can handle
1412 * races of where it gets disabled but we still do a record.
1413 * As the check is in the fast path of the tracers, it is more
1414 * important to be fast than accurate.
1416 tr->buffer_disabled = 1;
1417 /* Make the flag seen by readers */
1422 * tracing_off - turn off tracing buffers
1424 * This function stops the tracing buffers from recording data.
1425 * It does not disable any overhead the tracers themselves may
1426 * be causing. This function simply causes all recording to
1427 * the ring buffers to fail.
1429 void tracing_off(void)
1431 tracer_tracing_off(&global_trace);
1433 EXPORT_SYMBOL_GPL(tracing_off);
1435 void disable_trace_on_warning(void)
1437 if (__disable_trace_on_warning) {
1438 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1439 "Disabling tracing due to warning\n");
1445 * tracer_tracing_is_on - show real state of ring buffer enabled
1446 * @tr : the trace array to know if ring buffer is enabled
1448 * Shows real state of the ring buffer if it is enabled or not.
1450 bool tracer_tracing_is_on(struct trace_array *tr)
1452 if (tr->array_buffer.buffer)
1453 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1454 return !tr->buffer_disabled;
1458 * tracing_is_on - show state of ring buffers enabled
1460 int tracing_is_on(void)
1462 return tracer_tracing_is_on(&global_trace);
1464 EXPORT_SYMBOL_GPL(tracing_is_on);
1466 static int __init set_buf_size(char *str)
1468 unsigned long buf_size;
1472 buf_size = memparse(str, &str);
1473 /* nr_entries can not be zero */
1476 trace_buf_size = buf_size;
1479 __setup("trace_buf_size=", set_buf_size);
1481 static int __init set_tracing_thresh(char *str)
1483 unsigned long threshold;
1488 ret = kstrtoul(str, 0, &threshold);
1491 tracing_thresh = threshold * 1000;
1494 __setup("tracing_thresh=", set_tracing_thresh);
1496 unsigned long nsecs_to_usecs(unsigned long nsecs)
1498 return nsecs / 1000;
1502 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1503 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1504 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1505 * of strings in the order that the evals (enum) were defined.
1510 /* These must match the bit positions in trace_iterator_flags */
1511 static const char *trace_options[] = {
1519 int in_ns; /* is this clock in nanoseconds? */
1520 } trace_clocks[] = {
1521 { trace_clock_local, "local", 1 },
1522 { trace_clock_global, "global", 1 },
1523 { trace_clock_counter, "counter", 0 },
1524 { trace_clock_jiffies, "uptime", 0 },
1525 { trace_clock, "perf", 1 },
1526 { ktime_get_mono_fast_ns, "mono", 1 },
1527 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1528 { ktime_get_boot_fast_ns, "boot", 1 },
1532 bool trace_clock_in_ns(struct trace_array *tr)
1534 if (trace_clocks[tr->clock_id].in_ns)
1541 * trace_parser_get_init - gets the buffer for trace parser
1543 int trace_parser_get_init(struct trace_parser *parser, int size)
1545 memset(parser, 0, sizeof(*parser));
1547 parser->buffer = kmalloc(size, GFP_KERNEL);
1548 if (!parser->buffer)
1551 parser->size = size;
1556 * trace_parser_put - frees the buffer for trace parser
1558 void trace_parser_put(struct trace_parser *parser)
1560 kfree(parser->buffer);
1561 parser->buffer = NULL;
1565 * trace_get_user - reads the user input string separated by space
1566 * (matched by isspace(ch))
1568 * For each string found the 'struct trace_parser' is updated,
1569 * and the function returns.
1571 * Returns number of bytes read.
1573 * See kernel/trace/trace.h for 'struct trace_parser' details.
1575 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1576 size_t cnt, loff_t *ppos)
1583 trace_parser_clear(parser);
1585 ret = get_user(ch, ubuf++);
1593 * The parser is not finished with the last write,
1594 * continue reading the user input without skipping spaces.
1596 if (!parser->cont) {
1597 /* skip white space */
1598 while (cnt && isspace(ch)) {
1599 ret = get_user(ch, ubuf++);
1608 /* only spaces were written */
1609 if (isspace(ch) || !ch) {
1616 /* read the non-space input */
1617 while (cnt && !isspace(ch) && ch) {
1618 if (parser->idx < parser->size - 1)
1619 parser->buffer[parser->idx++] = ch;
1624 ret = get_user(ch, ubuf++);
1631 /* We either got finished input or we have to wait for another call. */
1632 if (isspace(ch) || !ch) {
1633 parser->buffer[parser->idx] = 0;
1634 parser->cont = false;
1635 } else if (parser->idx < parser->size - 1) {
1636 parser->cont = true;
1637 parser->buffer[parser->idx++] = ch;
1638 /* Make sure the parsed string always terminates with '\0'. */
1639 parser->buffer[parser->idx] = 0;
1652 /* TODO add a seq_buf_to_buffer() */
1653 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1657 if (trace_seq_used(s) <= s->seq.readpos)
1660 len = trace_seq_used(s) - s->seq.readpos;
1663 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1665 s->seq.readpos += cnt;
1669 unsigned long __read_mostly tracing_thresh;
1670 static const struct file_operations tracing_max_lat_fops;
1672 #ifdef LATENCY_FS_NOTIFY
1674 static struct workqueue_struct *fsnotify_wq;
1676 static void latency_fsnotify_workfn(struct work_struct *work)
1678 struct trace_array *tr = container_of(work, struct trace_array,
1680 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1683 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1685 struct trace_array *tr = container_of(iwork, struct trace_array,
1687 queue_work(fsnotify_wq, &tr->fsnotify_work);
1690 static void trace_create_maxlat_file(struct trace_array *tr,
1691 struct dentry *d_tracer)
1693 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1694 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1695 tr->d_max_latency = trace_create_file("tracing_max_latency",
1697 d_tracer, &tr->max_latency,
1698 &tracing_max_lat_fops);
1701 __init static int latency_fsnotify_init(void)
1703 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1704 WQ_UNBOUND | WQ_HIGHPRI, 0);
1706 pr_err("Unable to allocate tr_max_lat_wq\n");
1712 late_initcall_sync(latency_fsnotify_init);
1714 void latency_fsnotify(struct trace_array *tr)
1719 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1720 * possible that we are called from __schedule() or do_idle(), which
1721 * could cause a deadlock.
1723 irq_work_queue(&tr->fsnotify_irqwork);
1726 #elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
1727 || defined(CONFIG_OSNOISE_TRACER)
1729 #define trace_create_maxlat_file(tr, d_tracer) \
1730 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1731 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1734 #define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
1737 #ifdef CONFIG_TRACER_MAX_TRACE
1739 * Copy the new maximum trace into the separate maximum-trace
1740 * structure. (this way the maximum trace is permanently saved,
1741 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1744 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1746 struct array_buffer *trace_buf = &tr->array_buffer;
1747 struct array_buffer *max_buf = &tr->max_buffer;
1748 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1749 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1752 max_buf->time_start = data->preempt_timestamp;
1754 max_data->saved_latency = tr->max_latency;
1755 max_data->critical_start = data->critical_start;
1756 max_data->critical_end = data->critical_end;
1758 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1759 max_data->pid = tsk->pid;
1761 * If tsk == current, then use current_uid(), as that does not use
1762 * RCU. The irq tracer can be called out of RCU scope.
1765 max_data->uid = current_uid();
1767 max_data->uid = task_uid(tsk);
1769 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1770 max_data->policy = tsk->policy;
1771 max_data->rt_priority = tsk->rt_priority;
1773 /* record this tasks comm */
1774 tracing_record_cmdline(tsk);
1775 latency_fsnotify(tr);
1779 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1781 * @tsk: the task with the latency
1782 * @cpu: The cpu that initiated the trace.
1783 * @cond_data: User data associated with a conditional snapshot
1785 * Flip the buffers between the @tr and the max_tr and record information
1786 * about which task was the cause of this latency.
1789 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1795 WARN_ON_ONCE(!irqs_disabled());
1797 if (!tr->allocated_snapshot) {
1798 /* Only the nop tracer should hit this when disabling */
1799 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1803 arch_spin_lock(&tr->max_lock);
1805 /* Inherit the recordable setting from array_buffer */
1806 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1807 ring_buffer_record_on(tr->max_buffer.buffer);
1809 ring_buffer_record_off(tr->max_buffer.buffer);
1811 #ifdef CONFIG_TRACER_SNAPSHOT
1812 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1815 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1817 __update_max_tr(tr, tsk, cpu);
1820 arch_spin_unlock(&tr->max_lock);
1824 * update_max_tr_single - only copy one trace over, and reset the rest
1826 * @tsk: task with the latency
1827 * @cpu: the cpu of the buffer to copy.
1829 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1832 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1839 WARN_ON_ONCE(!irqs_disabled());
1840 if (!tr->allocated_snapshot) {
1841 /* Only the nop tracer should hit this when disabling */
1842 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1846 arch_spin_lock(&tr->max_lock);
1848 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1850 if (ret == -EBUSY) {
1852 * We failed to swap the buffer due to a commit taking
1853 * place on this CPU. We fail to record, but we reset
1854 * the max trace buffer (no one writes directly to it)
1855 * and flag that it failed.
1857 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1858 "Failed to swap buffers due to commit in progress\n");
1861 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1863 __update_max_tr(tr, tsk, cpu);
1864 arch_spin_unlock(&tr->max_lock);
1866 #endif /* CONFIG_TRACER_MAX_TRACE */
1868 static int wait_on_pipe(struct trace_iterator *iter, int full)
1870 /* Iterators are static, they should be filled or empty */
1871 if (trace_buffer_iter(iter, iter->cpu_file))
1874 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1878 #ifdef CONFIG_FTRACE_STARTUP_TEST
1879 static bool selftests_can_run;
1881 struct trace_selftests {
1882 struct list_head list;
1883 struct tracer *type;
1886 static LIST_HEAD(postponed_selftests);
1888 static int save_selftest(struct tracer *type)
1890 struct trace_selftests *selftest;
1892 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1896 selftest->type = type;
1897 list_add(&selftest->list, &postponed_selftests);
1901 static int run_tracer_selftest(struct tracer *type)
1903 struct trace_array *tr = &global_trace;
1904 struct tracer *saved_tracer = tr->current_trace;
1907 if (!type->selftest || tracing_selftest_disabled)
1911 * If a tracer registers early in boot up (before scheduling is
1912 * initialized and such), then do not run its selftests yet.
1913 * Instead, run it a little later in the boot process.
1915 if (!selftests_can_run)
1916 return save_selftest(type);
1918 if (!tracing_is_on()) {
1919 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1925 * Run a selftest on this tracer.
1926 * Here we reset the trace buffer, and set the current
1927 * tracer to be this tracer. The tracer can then run some
1928 * internal tracing to verify that everything is in order.
1929 * If we fail, we do not register this tracer.
1931 tracing_reset_online_cpus(&tr->array_buffer);
1933 tr->current_trace = type;
1935 #ifdef CONFIG_TRACER_MAX_TRACE
1936 if (type->use_max_tr) {
1937 /* If we expanded the buffers, make sure the max is expanded too */
1938 if (ring_buffer_expanded)
1939 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1940 RING_BUFFER_ALL_CPUS);
1941 tr->allocated_snapshot = true;
1945 /* the test is responsible for initializing and enabling */
1946 pr_info("Testing tracer %s: ", type->name);
1947 ret = type->selftest(type, tr);
1948 /* the test is responsible for resetting too */
1949 tr->current_trace = saved_tracer;
1951 printk(KERN_CONT "FAILED!\n");
1952 /* Add the warning after printing 'FAILED' */
1956 /* Only reset on passing, to avoid touching corrupted buffers */
1957 tracing_reset_online_cpus(&tr->array_buffer);
1959 #ifdef CONFIG_TRACER_MAX_TRACE
1960 if (type->use_max_tr) {
1961 tr->allocated_snapshot = false;
1963 /* Shrink the max buffer again */
1964 if (ring_buffer_expanded)
1965 ring_buffer_resize(tr->max_buffer.buffer, 1,
1966 RING_BUFFER_ALL_CPUS);
1970 printk(KERN_CONT "PASSED\n");
1974 static __init int init_trace_selftests(void)
1976 struct trace_selftests *p, *n;
1977 struct tracer *t, **last;
1980 selftests_can_run = true;
1982 mutex_lock(&trace_types_lock);
1984 if (list_empty(&postponed_selftests))
1987 pr_info("Running postponed tracer tests:\n");
1989 tracing_selftest_running = true;
1990 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1991 /* This loop can take minutes when sanitizers are enabled, so
1992 * lets make sure we allow RCU processing.
1995 ret = run_tracer_selftest(p->type);
1996 /* If the test fails, then warn and remove from available_tracers */
1998 WARN(1, "tracer: %s failed selftest, disabling\n",
2000 last = &trace_types;
2001 for (t = trace_types; t; t = t->next) {
2012 tracing_selftest_running = false;
2015 mutex_unlock(&trace_types_lock);
2019 core_initcall(init_trace_selftests);
2021 static inline int run_tracer_selftest(struct tracer *type)
2025 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2027 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2029 static void __init apply_trace_boot_options(void);
2032 * register_tracer - register a tracer with the ftrace system.
2033 * @type: the plugin for the tracer
2035 * Register a new plugin tracer.
2037 int __init register_tracer(struct tracer *type)
2043 pr_info("Tracer must have a name\n");
2047 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2048 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2052 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2053 pr_warn("Can not register tracer %s due to lockdown\n",
2058 mutex_lock(&trace_types_lock);
2060 tracing_selftest_running = true;
2062 for (t = trace_types; t; t = t->next) {
2063 if (strcmp(type->name, t->name) == 0) {
2065 pr_info("Tracer %s already registered\n",
2072 if (!type->set_flag)
2073 type->set_flag = &dummy_set_flag;
2075 /*allocate a dummy tracer_flags*/
2076 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2081 type->flags->val = 0;
2082 type->flags->opts = dummy_tracer_opt;
2084 if (!type->flags->opts)
2085 type->flags->opts = dummy_tracer_opt;
2087 /* store the tracer for __set_tracer_option */
2088 type->flags->trace = type;
2090 ret = run_tracer_selftest(type);
2094 type->next = trace_types;
2096 add_tracer_options(&global_trace, type);
2099 tracing_selftest_running = false;
2100 mutex_unlock(&trace_types_lock);
2102 if (ret || !default_bootup_tracer)
2105 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2108 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2109 /* Do we want this tracer to start on bootup? */
2110 tracing_set_tracer(&global_trace, type->name);
2111 default_bootup_tracer = NULL;
2113 apply_trace_boot_options();
2115 /* disable other selftests, since this will break it. */
2116 disable_tracing_selftest("running a tracer");
2122 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2124 struct trace_buffer *buffer = buf->buffer;
2129 ring_buffer_record_disable(buffer);
2131 /* Make sure all commits have finished */
2133 ring_buffer_reset_cpu(buffer, cpu);
2135 ring_buffer_record_enable(buffer);
2138 void tracing_reset_online_cpus(struct array_buffer *buf)
2140 struct trace_buffer *buffer = buf->buffer;
2145 ring_buffer_record_disable(buffer);
2147 /* Make sure all commits have finished */
2150 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2152 ring_buffer_reset_online_cpus(buffer);
2154 ring_buffer_record_enable(buffer);
2157 /* Must have trace_types_lock held */
2158 void tracing_reset_all_online_cpus(void)
2160 struct trace_array *tr;
2162 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2163 if (!tr->clear_trace)
2165 tr->clear_trace = false;
2166 tracing_reset_online_cpus(&tr->array_buffer);
2167 #ifdef CONFIG_TRACER_MAX_TRACE
2168 tracing_reset_online_cpus(&tr->max_buffer);
2174 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2175 * is the tgid last observed corresponding to pid=i.
2177 static int *tgid_map;
2179 /* The maximum valid index into tgid_map. */
2180 static size_t tgid_map_max;
2182 #define SAVED_CMDLINES_DEFAULT 128
2183 #define NO_CMDLINE_MAP UINT_MAX
2184 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2185 struct saved_cmdlines_buffer {
2186 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2187 unsigned *map_cmdline_to_pid;
2188 unsigned cmdline_num;
2190 char *saved_cmdlines;
2192 static struct saved_cmdlines_buffer *savedcmd;
2194 static inline char *get_saved_cmdlines(int idx)
2196 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2199 static inline void set_cmdline(int idx, const char *cmdline)
2201 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2204 static int allocate_cmdlines_buffer(unsigned int val,
2205 struct saved_cmdlines_buffer *s)
2207 s->map_cmdline_to_pid = kmalloc_array(val,
2208 sizeof(*s->map_cmdline_to_pid),
2210 if (!s->map_cmdline_to_pid)
2213 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2214 if (!s->saved_cmdlines) {
2215 kfree(s->map_cmdline_to_pid);
2220 s->cmdline_num = val;
2221 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2222 sizeof(s->map_pid_to_cmdline));
2223 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2224 val * sizeof(*s->map_cmdline_to_pid));
2229 static int trace_create_savedcmd(void)
2233 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2237 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2247 int is_tracing_stopped(void)
2249 return global_trace.stop_count;
2253 * tracing_start - quick start of the tracer
2255 * If tracing is enabled but was stopped by tracing_stop,
2256 * this will start the tracer back up.
2258 void tracing_start(void)
2260 struct trace_buffer *buffer;
2261 unsigned long flags;
2263 if (tracing_disabled)
2266 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2267 if (--global_trace.stop_count) {
2268 if (global_trace.stop_count < 0) {
2269 /* Someone screwed up their debugging */
2271 global_trace.stop_count = 0;
2276 /* Prevent the buffers from switching */
2277 arch_spin_lock(&global_trace.max_lock);
2279 buffer = global_trace.array_buffer.buffer;
2281 ring_buffer_record_enable(buffer);
2283 #ifdef CONFIG_TRACER_MAX_TRACE
2284 buffer = global_trace.max_buffer.buffer;
2286 ring_buffer_record_enable(buffer);
2289 arch_spin_unlock(&global_trace.max_lock);
2292 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2295 static void tracing_start_tr(struct trace_array *tr)
2297 struct trace_buffer *buffer;
2298 unsigned long flags;
2300 if (tracing_disabled)
2303 /* If global, we need to also start the max tracer */
2304 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2305 return tracing_start();
2307 raw_spin_lock_irqsave(&tr->start_lock, flags);
2309 if (--tr->stop_count) {
2310 if (tr->stop_count < 0) {
2311 /* Someone screwed up their debugging */
2318 buffer = tr->array_buffer.buffer;
2320 ring_buffer_record_enable(buffer);
2323 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2327 * tracing_stop - quick stop of the tracer
2329 * Light weight way to stop tracing. Use in conjunction with
2332 void tracing_stop(void)
2334 struct trace_buffer *buffer;
2335 unsigned long flags;
2337 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2338 if (global_trace.stop_count++)
2341 /* Prevent the buffers from switching */
2342 arch_spin_lock(&global_trace.max_lock);
2344 buffer = global_trace.array_buffer.buffer;
2346 ring_buffer_record_disable(buffer);
2348 #ifdef CONFIG_TRACER_MAX_TRACE
2349 buffer = global_trace.max_buffer.buffer;
2351 ring_buffer_record_disable(buffer);
2354 arch_spin_unlock(&global_trace.max_lock);
2357 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2360 static void tracing_stop_tr(struct trace_array *tr)
2362 struct trace_buffer *buffer;
2363 unsigned long flags;
2365 /* If global, we need to also stop the max tracer */
2366 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2367 return tracing_stop();
2369 raw_spin_lock_irqsave(&tr->start_lock, flags);
2370 if (tr->stop_count++)
2373 buffer = tr->array_buffer.buffer;
2375 ring_buffer_record_disable(buffer);
2378 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2381 static int trace_save_cmdline(struct task_struct *tsk)
2385 /* treat recording of idle task as a success */
2389 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2392 * It's not the end of the world if we don't get
2393 * the lock, but we also don't want to spin
2394 * nor do we want to disable interrupts,
2395 * so if we miss here, then better luck next time.
2397 if (!arch_spin_trylock(&trace_cmdline_lock))
2400 idx = savedcmd->map_pid_to_cmdline[tpid];
2401 if (idx == NO_CMDLINE_MAP) {
2402 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2404 savedcmd->map_pid_to_cmdline[tpid] = idx;
2405 savedcmd->cmdline_idx = idx;
2408 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2409 set_cmdline(idx, tsk->comm);
2411 arch_spin_unlock(&trace_cmdline_lock);
2416 static void __trace_find_cmdline(int pid, char comm[])
2422 strcpy(comm, "<idle>");
2426 if (WARN_ON_ONCE(pid < 0)) {
2427 strcpy(comm, "<XXX>");
2431 tpid = pid & (PID_MAX_DEFAULT - 1);
2432 map = savedcmd->map_pid_to_cmdline[tpid];
2433 if (map != NO_CMDLINE_MAP) {
2434 tpid = savedcmd->map_cmdline_to_pid[map];
2436 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2440 strcpy(comm, "<...>");
2443 void trace_find_cmdline(int pid, char comm[])
2446 arch_spin_lock(&trace_cmdline_lock);
2448 __trace_find_cmdline(pid, comm);
2450 arch_spin_unlock(&trace_cmdline_lock);
2454 static int *trace_find_tgid_ptr(int pid)
2457 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2458 * if we observe a non-NULL tgid_map then we also observe the correct
2461 int *map = smp_load_acquire(&tgid_map);
2463 if (unlikely(!map || pid > tgid_map_max))
2469 int trace_find_tgid(int pid)
2471 int *ptr = trace_find_tgid_ptr(pid);
2473 return ptr ? *ptr : 0;
2476 static int trace_save_tgid(struct task_struct *tsk)
2480 /* treat recording of idle task as a success */
2484 ptr = trace_find_tgid_ptr(tsk->pid);
2492 static bool tracing_record_taskinfo_skip(int flags)
2494 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2496 if (!__this_cpu_read(trace_taskinfo_save))
2502 * tracing_record_taskinfo - record the task info of a task
2504 * @task: task to record
2505 * @flags: TRACE_RECORD_CMDLINE for recording comm
2506 * TRACE_RECORD_TGID for recording tgid
2508 void tracing_record_taskinfo(struct task_struct *task, int flags)
2512 if (tracing_record_taskinfo_skip(flags))
2516 * Record as much task information as possible. If some fail, continue
2517 * to try to record the others.
2519 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2520 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2522 /* If recording any information failed, retry again soon. */
2526 __this_cpu_write(trace_taskinfo_save, false);
2530 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2532 * @prev: previous task during sched_switch
2533 * @next: next task during sched_switch
2534 * @flags: TRACE_RECORD_CMDLINE for recording comm
2535 * TRACE_RECORD_TGID for recording tgid
2537 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2538 struct task_struct *next, int flags)
2542 if (tracing_record_taskinfo_skip(flags))
2546 * Record as much task information as possible. If some fail, continue
2547 * to try to record the others.
2549 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2550 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2551 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2552 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2554 /* If recording any information failed, retry again soon. */
2558 __this_cpu_write(trace_taskinfo_save, false);
2561 /* Helpers to record a specific task information */
2562 void tracing_record_cmdline(struct task_struct *task)
2564 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2567 void tracing_record_tgid(struct task_struct *task)
2569 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2573 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2574 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2575 * simplifies those functions and keeps them in sync.
2577 enum print_line_t trace_handle_return(struct trace_seq *s)
2579 return trace_seq_has_overflowed(s) ?
2580 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2582 EXPORT_SYMBOL_GPL(trace_handle_return);
2584 static unsigned short migration_disable_value(void)
2586 #if defined(CONFIG_SMP)
2587 return current->migration_disabled;
2593 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2595 unsigned int trace_flags = irqs_status;
2598 pc = preempt_count();
2601 trace_flags |= TRACE_FLAG_NMI;
2602 if (pc & HARDIRQ_MASK)
2603 trace_flags |= TRACE_FLAG_HARDIRQ;
2604 if (in_serving_softirq())
2605 trace_flags |= TRACE_FLAG_SOFTIRQ;
2606 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2607 trace_flags |= TRACE_FLAG_BH_OFF;
2609 if (tif_need_resched())
2610 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2611 if (test_preempt_need_resched())
2612 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2613 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2614 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2617 struct ring_buffer_event *
2618 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2621 unsigned int trace_ctx)
2623 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2626 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2627 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2628 static int trace_buffered_event_ref;
2631 * trace_buffered_event_enable - enable buffering events
2633 * When events are being filtered, it is quicker to use a temporary
2634 * buffer to write the event data into if there's a likely chance
2635 * that it will not be committed. The discard of the ring buffer
2636 * is not as fast as committing, and is much slower than copying
2639 * When an event is to be filtered, allocate per cpu buffers to
2640 * write the event data into, and if the event is filtered and discarded
2641 * it is simply dropped, otherwise, the entire data is to be committed
2644 void trace_buffered_event_enable(void)
2646 struct ring_buffer_event *event;
2650 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2652 if (trace_buffered_event_ref++)
2655 for_each_tracing_cpu(cpu) {
2656 page = alloc_pages_node(cpu_to_node(cpu),
2657 GFP_KERNEL | __GFP_NORETRY, 0);
2661 event = page_address(page);
2662 memset(event, 0, sizeof(*event));
2664 per_cpu(trace_buffered_event, cpu) = event;
2667 if (cpu == smp_processor_id() &&
2668 __this_cpu_read(trace_buffered_event) !=
2669 per_cpu(trace_buffered_event, cpu))
2676 trace_buffered_event_disable();
2679 static void enable_trace_buffered_event(void *data)
2681 /* Probably not needed, but do it anyway */
2683 this_cpu_dec(trace_buffered_event_cnt);
2686 static void disable_trace_buffered_event(void *data)
2688 this_cpu_inc(trace_buffered_event_cnt);
2692 * trace_buffered_event_disable - disable buffering events
2694 * When a filter is removed, it is faster to not use the buffered
2695 * events, and to commit directly into the ring buffer. Free up
2696 * the temp buffers when there are no more users. This requires
2697 * special synchronization with current events.
2699 void trace_buffered_event_disable(void)
2703 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2705 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2708 if (--trace_buffered_event_ref)
2712 /* For each CPU, set the buffer as used. */
2713 smp_call_function_many(tracing_buffer_mask,
2714 disable_trace_buffered_event, NULL, 1);
2717 /* Wait for all current users to finish */
2720 for_each_tracing_cpu(cpu) {
2721 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2722 per_cpu(trace_buffered_event, cpu) = NULL;
2725 * Make sure trace_buffered_event is NULL before clearing
2726 * trace_buffered_event_cnt.
2731 /* Do the work on each cpu */
2732 smp_call_function_many(tracing_buffer_mask,
2733 enable_trace_buffered_event, NULL, 1);
2737 static struct trace_buffer *temp_buffer;
2739 struct ring_buffer_event *
2740 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2741 struct trace_event_file *trace_file,
2742 int type, unsigned long len,
2743 unsigned int trace_ctx)
2745 struct ring_buffer_event *entry;
2746 struct trace_array *tr = trace_file->tr;
2749 *current_rb = tr->array_buffer.buffer;
2751 if (!tr->no_filter_buffering_ref &&
2752 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2753 preempt_disable_notrace();
2755 * Filtering is on, so try to use the per cpu buffer first.
2756 * This buffer will simulate a ring_buffer_event,
2757 * where the type_len is zero and the array[0] will
2758 * hold the full length.
2759 * (see include/linux/ring-buffer.h for details on
2760 * how the ring_buffer_event is structured).
2762 * Using a temp buffer during filtering and copying it
2763 * on a matched filter is quicker than writing directly
2764 * into the ring buffer and then discarding it when
2765 * it doesn't match. That is because the discard
2766 * requires several atomic operations to get right.
2767 * Copying on match and doing nothing on a failed match
2768 * is still quicker than no copy on match, but having
2769 * to discard out of the ring buffer on a failed match.
2771 if ((entry = __this_cpu_read(trace_buffered_event))) {
2772 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2774 val = this_cpu_inc_return(trace_buffered_event_cnt);
2777 * Preemption is disabled, but interrupts and NMIs
2778 * can still come in now. If that happens after
2779 * the above increment, then it will have to go
2780 * back to the old method of allocating the event
2781 * on the ring buffer, and if the filter fails, it
2782 * will have to call ring_buffer_discard_commit()
2785 * Need to also check the unlikely case that the
2786 * length is bigger than the temp buffer size.
2787 * If that happens, then the reserve is pretty much
2788 * guaranteed to fail, as the ring buffer currently
2789 * only allows events less than a page. But that may
2790 * change in the future, so let the ring buffer reserve
2791 * handle the failure in that case.
2793 if (val == 1 && likely(len <= max_len)) {
2794 trace_event_setup(entry, type, trace_ctx);
2795 entry->array[0] = len;
2796 /* Return with preemption disabled */
2799 this_cpu_dec(trace_buffered_event_cnt);
2801 /* __trace_buffer_lock_reserve() disables preemption */
2802 preempt_enable_notrace();
2805 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2808 * If tracing is off, but we have triggers enabled
2809 * we still need to look at the event data. Use the temp_buffer
2810 * to store the trace event for the trigger to use. It's recursive
2811 * safe and will not be recorded anywhere.
2813 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2814 *current_rb = temp_buffer;
2815 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2820 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2822 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2823 static DEFINE_MUTEX(tracepoint_printk_mutex);
2825 static void output_printk(struct trace_event_buffer *fbuffer)
2827 struct trace_event_call *event_call;
2828 struct trace_event_file *file;
2829 struct trace_event *event;
2830 unsigned long flags;
2831 struct trace_iterator *iter = tracepoint_print_iter;
2833 /* We should never get here if iter is NULL */
2834 if (WARN_ON_ONCE(!iter))
2837 event_call = fbuffer->trace_file->event_call;
2838 if (!event_call || !event_call->event.funcs ||
2839 !event_call->event.funcs->trace)
2842 file = fbuffer->trace_file;
2843 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2844 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2845 !filter_match_preds(file->filter, fbuffer->entry)))
2848 event = &fbuffer->trace_file->event_call->event;
2850 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2851 trace_seq_init(&iter->seq);
2852 iter->ent = fbuffer->entry;
2853 event_call->event.funcs->trace(iter, 0, event);
2854 trace_seq_putc(&iter->seq, 0);
2855 printk("%s", iter->seq.buffer);
2857 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2860 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2861 void *buffer, size_t *lenp,
2864 int save_tracepoint_printk;
2867 mutex_lock(&tracepoint_printk_mutex);
2868 save_tracepoint_printk = tracepoint_printk;
2870 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2873 * This will force exiting early, as tracepoint_printk
2874 * is always zero when tracepoint_printk_iter is not allocated
2876 if (!tracepoint_print_iter)
2877 tracepoint_printk = 0;
2879 if (save_tracepoint_printk == tracepoint_printk)
2882 if (tracepoint_printk)
2883 static_key_enable(&tracepoint_printk_key.key);
2885 static_key_disable(&tracepoint_printk_key.key);
2888 mutex_unlock(&tracepoint_printk_mutex);
2893 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2895 enum event_trigger_type tt = ETT_NONE;
2896 struct trace_event_file *file = fbuffer->trace_file;
2898 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2899 fbuffer->entry, &tt))
2902 if (static_key_false(&tracepoint_printk_key.key))
2903 output_printk(fbuffer);
2905 if (static_branch_unlikely(&trace_event_exports_enabled))
2906 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2908 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2909 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2913 event_triggers_post_call(file, tt);
2916 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2921 * trace_buffer_unlock_commit_regs()
2922 * trace_event_buffer_commit()
2923 * trace_event_raw_event_xxx()
2925 # define STACK_SKIP 3
2927 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2928 struct trace_buffer *buffer,
2929 struct ring_buffer_event *event,
2930 unsigned int trace_ctx,
2931 struct pt_regs *regs)
2933 __buffer_unlock_commit(buffer, event);
2936 * If regs is not set, then skip the necessary functions.
2937 * Note, we can still get here via blktrace, wakeup tracer
2938 * and mmiotrace, but that's ok if they lose a function or
2939 * two. They are not that meaningful.
2941 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2942 ftrace_trace_userstack(tr, buffer, trace_ctx);
2946 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2949 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2950 struct ring_buffer_event *event)
2952 __buffer_unlock_commit(buffer, event);
2956 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2957 parent_ip, unsigned int trace_ctx)
2959 struct trace_event_call *call = &event_function;
2960 struct trace_buffer *buffer = tr->array_buffer.buffer;
2961 struct ring_buffer_event *event;
2962 struct ftrace_entry *entry;
2964 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2968 entry = ring_buffer_event_data(event);
2970 entry->parent_ip = parent_ip;
2972 if (!call_filter_check_discard(call, entry, buffer, event)) {
2973 if (static_branch_unlikely(&trace_function_exports_enabled))
2974 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2975 __buffer_unlock_commit(buffer, event);
2979 #ifdef CONFIG_STACKTRACE
2981 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2982 #define FTRACE_KSTACK_NESTING 4
2984 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2986 struct ftrace_stack {
2987 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2991 struct ftrace_stacks {
2992 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2995 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2996 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2998 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2999 unsigned int trace_ctx,
3000 int skip, struct pt_regs *regs)
3002 struct trace_event_call *call = &event_kernel_stack;
3003 struct ring_buffer_event *event;
3004 unsigned int size, nr_entries;
3005 struct ftrace_stack *fstack;
3006 struct stack_entry *entry;
3010 * Add one, for this function and the call to save_stack_trace()
3011 * If regs is set, then these functions will not be in the way.
3013 #ifndef CONFIG_UNWINDER_ORC
3018 preempt_disable_notrace();
3020 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3022 /* This should never happen. If it does, yell once and skip */
3023 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3027 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3028 * interrupt will either see the value pre increment or post
3029 * increment. If the interrupt happens pre increment it will have
3030 * restored the counter when it returns. We just need a barrier to
3031 * keep gcc from moving things around.
3035 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3036 size = ARRAY_SIZE(fstack->calls);
3039 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3042 nr_entries = stack_trace_save(fstack->calls, size, skip);
3045 size = nr_entries * sizeof(unsigned long);
3046 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3047 (sizeof(*entry) - sizeof(entry->caller)) + size,
3051 entry = ring_buffer_event_data(event);
3053 memcpy(&entry->caller, fstack->calls, size);
3054 entry->size = nr_entries;
3056 if (!call_filter_check_discard(call, entry, buffer, event))
3057 __buffer_unlock_commit(buffer, event);
3060 /* Again, don't let gcc optimize things here */
3062 __this_cpu_dec(ftrace_stack_reserve);
3063 preempt_enable_notrace();
3067 static inline void ftrace_trace_stack(struct trace_array *tr,
3068 struct trace_buffer *buffer,
3069 unsigned int trace_ctx,
3070 int skip, struct pt_regs *regs)
3072 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3075 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3078 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3081 struct trace_buffer *buffer = tr->array_buffer.buffer;
3083 if (rcu_is_watching()) {
3084 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3089 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3090 * but if the above rcu_is_watching() failed, then the NMI
3091 * triggered someplace critical, and rcu_irq_enter() should
3092 * not be called from NMI.
3094 if (unlikely(in_nmi()))
3097 rcu_irq_enter_irqson();
3098 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3099 rcu_irq_exit_irqson();
3103 * trace_dump_stack - record a stack back trace in the trace buffer
3104 * @skip: Number of functions to skip (helper handlers)
3106 void trace_dump_stack(int skip)
3108 if (tracing_disabled || tracing_selftest_running)
3111 #ifndef CONFIG_UNWINDER_ORC
3112 /* Skip 1 to skip this function. */
3115 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3116 tracing_gen_ctx(), skip, NULL);
3118 EXPORT_SYMBOL_GPL(trace_dump_stack);
3120 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3121 static DEFINE_PER_CPU(int, user_stack_count);
3124 ftrace_trace_userstack(struct trace_array *tr,
3125 struct trace_buffer *buffer, unsigned int trace_ctx)
3127 struct trace_event_call *call = &event_user_stack;
3128 struct ring_buffer_event *event;
3129 struct userstack_entry *entry;
3131 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3135 * NMIs can not handle page faults, even with fix ups.
3136 * The save user stack can (and often does) fault.
3138 if (unlikely(in_nmi()))
3142 * prevent recursion, since the user stack tracing may
3143 * trigger other kernel events.
3146 if (__this_cpu_read(user_stack_count))
3149 __this_cpu_inc(user_stack_count);
3151 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3152 sizeof(*entry), trace_ctx);
3154 goto out_drop_count;
3155 entry = ring_buffer_event_data(event);
3157 entry->tgid = current->tgid;
3158 memset(&entry->caller, 0, sizeof(entry->caller));
3160 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3161 if (!call_filter_check_discard(call, entry, buffer, event))
3162 __buffer_unlock_commit(buffer, event);
3165 __this_cpu_dec(user_stack_count);
3169 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3170 static void ftrace_trace_userstack(struct trace_array *tr,
3171 struct trace_buffer *buffer,
3172 unsigned int trace_ctx)
3175 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3177 #endif /* CONFIG_STACKTRACE */
3180 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3181 unsigned long long delta)
3183 entry->bottom_delta_ts = delta & U32_MAX;
3184 entry->top_delta_ts = (delta >> 32);
3187 void trace_last_func_repeats(struct trace_array *tr,
3188 struct trace_func_repeats *last_info,
3189 unsigned int trace_ctx)
3191 struct trace_buffer *buffer = tr->array_buffer.buffer;
3192 struct func_repeats_entry *entry;
3193 struct ring_buffer_event *event;
3196 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3197 sizeof(*entry), trace_ctx);
3201 delta = ring_buffer_event_time_stamp(buffer, event) -
3202 last_info->ts_last_call;
3204 entry = ring_buffer_event_data(event);
3205 entry->ip = last_info->ip;
3206 entry->parent_ip = last_info->parent_ip;
3207 entry->count = last_info->count;
3208 func_repeats_set_delta_ts(entry, delta);
3210 __buffer_unlock_commit(buffer, event);
3213 /* created for use with alloc_percpu */
3214 struct trace_buffer_struct {
3216 char buffer[4][TRACE_BUF_SIZE];
3219 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3222 * This allows for lockless recording. If we're nested too deeply, then
3223 * this returns NULL.
3225 static char *get_trace_buf(void)
3227 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3229 if (!trace_percpu_buffer || buffer->nesting >= 4)
3234 /* Interrupts must see nesting incremented before we use the buffer */
3236 return &buffer->buffer[buffer->nesting - 1][0];
3239 static void put_trace_buf(void)
3241 /* Don't let the decrement of nesting leak before this */
3243 this_cpu_dec(trace_percpu_buffer->nesting);
3246 static int alloc_percpu_trace_buffer(void)
3248 struct trace_buffer_struct __percpu *buffers;
3250 if (trace_percpu_buffer)
3253 buffers = alloc_percpu(struct trace_buffer_struct);
3254 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3257 trace_percpu_buffer = buffers;
3261 static int buffers_allocated;
3263 void trace_printk_init_buffers(void)
3265 if (buffers_allocated)
3268 if (alloc_percpu_trace_buffer())
3271 /* trace_printk() is for debug use only. Don't use it in production. */
3274 pr_warn("**********************************************************\n");
3275 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3277 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3279 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3280 pr_warn("** unsafe for production use. **\n");
3282 pr_warn("** If you see this message and you are not debugging **\n");
3283 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3285 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3286 pr_warn("**********************************************************\n");
3288 /* Expand the buffers to set size */
3289 tracing_update_buffers();
3291 buffers_allocated = 1;
3294 * trace_printk_init_buffers() can be called by modules.
3295 * If that happens, then we need to start cmdline recording
3296 * directly here. If the global_trace.buffer is already
3297 * allocated here, then this was called by module code.
3299 if (global_trace.array_buffer.buffer)
3300 tracing_start_cmdline_record();
3302 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3304 void trace_printk_start_comm(void)
3306 /* Start tracing comms if trace printk is set */
3307 if (!buffers_allocated)
3309 tracing_start_cmdline_record();
3312 static void trace_printk_start_stop_comm(int enabled)
3314 if (!buffers_allocated)
3318 tracing_start_cmdline_record();
3320 tracing_stop_cmdline_record();
3324 * trace_vbprintk - write binary msg to tracing buffer
3325 * @ip: The address of the caller
3326 * @fmt: The string format to write to the buffer
3327 * @args: Arguments for @fmt
3329 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3331 struct trace_event_call *call = &event_bprint;
3332 struct ring_buffer_event *event;
3333 struct trace_buffer *buffer;
3334 struct trace_array *tr = &global_trace;
3335 struct bprint_entry *entry;
3336 unsigned int trace_ctx;
3340 if (unlikely(tracing_selftest_running || tracing_disabled))
3343 /* Don't pollute graph traces with trace_vprintk internals */
3344 pause_graph_tracing();
3346 trace_ctx = tracing_gen_ctx();
3347 preempt_disable_notrace();
3349 tbuffer = get_trace_buf();
3355 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3357 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3360 size = sizeof(*entry) + sizeof(u32) * len;
3361 buffer = tr->array_buffer.buffer;
3362 ring_buffer_nest_start(buffer);
3363 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3367 entry = ring_buffer_event_data(event);
3371 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3372 if (!call_filter_check_discard(call, entry, buffer, event)) {
3373 __buffer_unlock_commit(buffer, event);
3374 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3378 ring_buffer_nest_end(buffer);
3383 preempt_enable_notrace();
3384 unpause_graph_tracing();
3388 EXPORT_SYMBOL_GPL(trace_vbprintk);
3392 __trace_array_vprintk(struct trace_buffer *buffer,
3393 unsigned long ip, const char *fmt, va_list args)
3395 struct trace_event_call *call = &event_print;
3396 struct ring_buffer_event *event;
3398 struct print_entry *entry;
3399 unsigned int trace_ctx;
3402 if (tracing_disabled || tracing_selftest_running)
3405 /* Don't pollute graph traces with trace_vprintk internals */
3406 pause_graph_tracing();
3408 trace_ctx = tracing_gen_ctx();
3409 preempt_disable_notrace();
3412 tbuffer = get_trace_buf();
3418 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3420 size = sizeof(*entry) + len + 1;
3421 ring_buffer_nest_start(buffer);
3422 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3426 entry = ring_buffer_event_data(event);
3429 memcpy(&entry->buf, tbuffer, len + 1);
3430 if (!call_filter_check_discard(call, entry, buffer, event)) {
3431 __buffer_unlock_commit(buffer, event);
3432 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3436 ring_buffer_nest_end(buffer);
3440 preempt_enable_notrace();
3441 unpause_graph_tracing();
3447 int trace_array_vprintk(struct trace_array *tr,
3448 unsigned long ip, const char *fmt, va_list args)
3450 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3454 * trace_array_printk - Print a message to a specific instance
3455 * @tr: The instance trace_array descriptor
3456 * @ip: The instruction pointer that this is called from.
3457 * @fmt: The format to print (printf format)
3459 * If a subsystem sets up its own instance, they have the right to
3460 * printk strings into their tracing instance buffer using this
3461 * function. Note, this function will not write into the top level
3462 * buffer (use trace_printk() for that), as writing into the top level
3463 * buffer should only have events that can be individually disabled.
3464 * trace_printk() is only used for debugging a kernel, and should not
3465 * be ever incorporated in normal use.
3467 * trace_array_printk() can be used, as it will not add noise to the
3468 * top level tracing buffer.
3470 * Note, trace_array_init_printk() must be called on @tr before this
3474 int trace_array_printk(struct trace_array *tr,
3475 unsigned long ip, const char *fmt, ...)
3483 /* This is only allowed for created instances */
3484 if (tr == &global_trace)
3487 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3491 ret = trace_array_vprintk(tr, ip, fmt, ap);
3495 EXPORT_SYMBOL_GPL(trace_array_printk);
3498 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3499 * @tr: The trace array to initialize the buffers for
3501 * As trace_array_printk() only writes into instances, they are OK to
3502 * have in the kernel (unlike trace_printk()). This needs to be called
3503 * before trace_array_printk() can be used on a trace_array.
3505 int trace_array_init_printk(struct trace_array *tr)
3510 /* This is only allowed for created instances */
3511 if (tr == &global_trace)
3514 return alloc_percpu_trace_buffer();
3516 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3519 int trace_array_printk_buf(struct trace_buffer *buffer,
3520 unsigned long ip, const char *fmt, ...)
3525 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3529 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3535 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3537 return trace_array_vprintk(&global_trace, ip, fmt, args);
3539 EXPORT_SYMBOL_GPL(trace_vprintk);
3541 static void trace_iterator_increment(struct trace_iterator *iter)
3543 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3547 ring_buffer_iter_advance(buf_iter);
3550 static struct trace_entry *
3551 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3552 unsigned long *lost_events)
3554 struct ring_buffer_event *event;
3555 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3558 event = ring_buffer_iter_peek(buf_iter, ts);
3560 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3561 (unsigned long)-1 : 0;
3563 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3568 iter->ent_size = ring_buffer_event_length(event);
3569 return ring_buffer_event_data(event);
3575 static struct trace_entry *
3576 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3577 unsigned long *missing_events, u64 *ent_ts)
3579 struct trace_buffer *buffer = iter->array_buffer->buffer;
3580 struct trace_entry *ent, *next = NULL;
3581 unsigned long lost_events = 0, next_lost = 0;
3582 int cpu_file = iter->cpu_file;
3583 u64 next_ts = 0, ts;
3589 * If we are in a per_cpu trace file, don't bother by iterating over
3590 * all cpu and peek directly.
3592 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3593 if (ring_buffer_empty_cpu(buffer, cpu_file))
3595 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3597 *ent_cpu = cpu_file;
3602 for_each_tracing_cpu(cpu) {
3604 if (ring_buffer_empty_cpu(buffer, cpu))
3607 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3610 * Pick the entry with the smallest timestamp:
3612 if (ent && (!next || ts < next_ts)) {
3616 next_lost = lost_events;
3617 next_size = iter->ent_size;
3621 iter->ent_size = next_size;
3624 *ent_cpu = next_cpu;
3630 *missing_events = next_lost;
3635 #define STATIC_FMT_BUF_SIZE 128
3636 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3638 static char *trace_iter_expand_format(struct trace_iterator *iter)
3643 * iter->tr is NULL when used with tp_printk, which makes
3644 * this get called where it is not safe to call krealloc().
3646 if (!iter->tr || iter->fmt == static_fmt_buf)
3649 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3652 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3659 /* Returns true if the string is safe to dereference from an event */
3660 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3662 unsigned long addr = (unsigned long)str;
3663 struct trace_event *trace_event;
3664 struct trace_event_call *event;
3666 /* OK if part of the event data */
3667 if ((addr >= (unsigned long)iter->ent) &&
3668 (addr < (unsigned long)iter->ent + iter->ent_size))
3671 /* OK if part of the temp seq buffer */
3672 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3673 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3676 /* Core rodata can not be freed */
3677 if (is_kernel_rodata(addr))
3680 if (trace_is_tracepoint_string(str))
3684 * Now this could be a module event, referencing core module
3685 * data, which is OK.
3690 trace_event = ftrace_find_event(iter->ent->type);
3694 event = container_of(trace_event, struct trace_event_call, event);
3695 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3698 /* Would rather have rodata, but this will suffice */
3699 if (within_module_core(addr, event->module))
3705 static const char *show_buffer(struct trace_seq *s)
3707 struct seq_buf *seq = &s->seq;
3709 seq_buf_terminate(seq);
3714 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3716 static int test_can_verify_check(const char *fmt, ...)
3723 * The verifier is dependent on vsnprintf() modifies the va_list
3724 * passed to it, where it is sent as a reference. Some architectures
3725 * (like x86_32) passes it by value, which means that vsnprintf()
3726 * does not modify the va_list passed to it, and the verifier
3727 * would then need to be able to understand all the values that
3728 * vsnprintf can use. If it is passed by value, then the verifier
3732 vsnprintf(buf, 16, "%d", ap);
3733 ret = va_arg(ap, int);
3739 static void test_can_verify(void)
3741 if (!test_can_verify_check("%d %d", 0, 1)) {
3742 pr_info("trace event string verifier disabled\n");
3743 static_branch_inc(&trace_no_verify);
3748 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3749 * @iter: The iterator that holds the seq buffer and the event being printed
3750 * @fmt: The format used to print the event
3751 * @ap: The va_list holding the data to print from @fmt.
3753 * This writes the data into the @iter->seq buffer using the data from
3754 * @fmt and @ap. If the format has a %s, then the source of the string
3755 * is examined to make sure it is safe to print, otherwise it will
3756 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3759 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3762 const char *p = fmt;
3766 if (WARN_ON_ONCE(!fmt))
3769 if (static_branch_unlikely(&trace_no_verify))
3772 /* Don't bother checking when doing a ftrace_dump() */
3773 if (iter->fmt == static_fmt_buf)
3782 /* We only care about %s and variants */
3783 for (i = 0; p[i]; i++) {
3784 if (i + 1 >= iter->fmt_size) {
3786 * If we can't expand the copy buffer,
3789 if (!trace_iter_expand_format(iter))
3793 if (p[i] == '\\' && p[i+1]) {
3798 /* Need to test cases like %08.*s */
3799 for (j = 1; p[i+j]; j++) {
3800 if (isdigit(p[i+j]) ||
3803 if (p[i+j] == '*') {
3815 /* If no %s found then just print normally */
3819 /* Copy up to the %s, and print that */
3820 strncpy(iter->fmt, p, i);
3821 iter->fmt[i] = '\0';
3822 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3825 * If iter->seq is full, the above call no longer guarantees
3826 * that ap is in sync with fmt processing, and further calls
3827 * to va_arg() can return wrong positional arguments.
3829 * Ensure that ap is no longer used in this case.
3831 if (iter->seq.full) {
3837 len = va_arg(ap, int);
3839 /* The ap now points to the string data of the %s */
3840 str = va_arg(ap, const char *);
3843 * If you hit this warning, it is likely that the
3844 * trace event in question used %s on a string that
3845 * was saved at the time of the event, but may not be
3846 * around when the trace is read. Use __string(),
3847 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3848 * instead. See samples/trace_events/trace-events-sample.h
3851 if (WARN_ONCE(!trace_safe_str(iter, str),
3852 "fmt: '%s' current_buffer: '%s'",
3853 fmt, show_buffer(&iter->seq))) {
3856 /* Try to safely read the string */
3858 if (len + 1 > iter->fmt_size)
3859 len = iter->fmt_size - 1;
3862 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3866 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3870 trace_seq_printf(&iter->seq, "(0x%px)", str);
3872 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3874 str = "[UNSAFE-MEMORY]";
3875 strcpy(iter->fmt, "%s");
3877 strncpy(iter->fmt, p + i, j + 1);
3878 iter->fmt[j+1] = '\0';
3881 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3883 trace_seq_printf(&iter->seq, iter->fmt, str);
3889 trace_seq_vprintf(&iter->seq, p, ap);
3892 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3894 const char *p, *new_fmt;
3897 if (WARN_ON_ONCE(!fmt))
3900 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3904 new_fmt = q = iter->fmt;
3906 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3907 if (!trace_iter_expand_format(iter))
3910 q += iter->fmt - new_fmt;
3911 new_fmt = iter->fmt;
3916 /* Replace %p with %px */
3920 } else if (p[0] == 'p' && !isalnum(p[1])) {
3931 #define STATIC_TEMP_BUF_SIZE 128
3932 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3934 /* Find the next real entry, without updating the iterator itself */
3935 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3936 int *ent_cpu, u64 *ent_ts)
3938 /* __find_next_entry will reset ent_size */
3939 int ent_size = iter->ent_size;
3940 struct trace_entry *entry;
3943 * If called from ftrace_dump(), then the iter->temp buffer
3944 * will be the static_temp_buf and not created from kmalloc.
3945 * If the entry size is greater than the buffer, we can
3946 * not save it. Just return NULL in that case. This is only
3947 * used to add markers when two consecutive events' time
3948 * stamps have a large delta. See trace_print_lat_context()
3950 if (iter->temp == static_temp_buf &&
3951 STATIC_TEMP_BUF_SIZE < ent_size)
3955 * The __find_next_entry() may call peek_next_entry(), which may
3956 * call ring_buffer_peek() that may make the contents of iter->ent
3957 * undefined. Need to copy iter->ent now.
3959 if (iter->ent && iter->ent != iter->temp) {
3960 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3961 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3963 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3968 iter->temp_size = iter->ent_size;
3970 memcpy(iter->temp, iter->ent, iter->ent_size);
3971 iter->ent = iter->temp;
3973 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3974 /* Put back the original ent_size */
3975 iter->ent_size = ent_size;
3980 /* Find the next real entry, and increment the iterator to the next entry */
3981 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3983 iter->ent = __find_next_entry(iter, &iter->cpu,
3984 &iter->lost_events, &iter->ts);
3987 trace_iterator_increment(iter);
3989 return iter->ent ? iter : NULL;
3992 static void trace_consume(struct trace_iterator *iter)
3994 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3995 &iter->lost_events);
3998 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4000 struct trace_iterator *iter = m->private;
4004 WARN_ON_ONCE(iter->leftover);
4008 /* can't go backwards */
4013 ent = trace_find_next_entry_inc(iter);
4017 while (ent && iter->idx < i)
4018 ent = trace_find_next_entry_inc(iter);
4025 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4027 struct ring_buffer_iter *buf_iter;
4028 unsigned long entries = 0;
4031 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4033 buf_iter = trace_buffer_iter(iter, cpu);
4037 ring_buffer_iter_reset(buf_iter);
4040 * We could have the case with the max latency tracers
4041 * that a reset never took place on a cpu. This is evident
4042 * by the timestamp being before the start of the buffer.
4044 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4045 if (ts >= iter->array_buffer->time_start)
4048 ring_buffer_iter_advance(buf_iter);
4051 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4055 * The current tracer is copied to avoid a global locking
4058 static void *s_start(struct seq_file *m, loff_t *pos)
4060 struct trace_iterator *iter = m->private;
4061 struct trace_array *tr = iter->tr;
4062 int cpu_file = iter->cpu_file;
4068 * copy the tracer to avoid using a global lock all around.
4069 * iter->trace is a copy of current_trace, the pointer to the
4070 * name may be used instead of a strcmp(), as iter->trace->name
4071 * will point to the same string as current_trace->name.
4073 mutex_lock(&trace_types_lock);
4074 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4075 *iter->trace = *tr->current_trace;
4076 mutex_unlock(&trace_types_lock);
4078 #ifdef CONFIG_TRACER_MAX_TRACE
4079 if (iter->snapshot && iter->trace->use_max_tr)
4080 return ERR_PTR(-EBUSY);
4083 if (*pos != iter->pos) {
4088 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4089 for_each_tracing_cpu(cpu)
4090 tracing_iter_reset(iter, cpu);
4092 tracing_iter_reset(iter, cpu_file);
4095 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4100 * If we overflowed the seq_file before, then we want
4101 * to just reuse the trace_seq buffer again.
4107 p = s_next(m, p, &l);
4111 trace_event_read_lock();
4112 trace_access_lock(cpu_file);
4116 static void s_stop(struct seq_file *m, void *p)
4118 struct trace_iterator *iter = m->private;
4120 #ifdef CONFIG_TRACER_MAX_TRACE
4121 if (iter->snapshot && iter->trace->use_max_tr)
4125 trace_access_unlock(iter->cpu_file);
4126 trace_event_read_unlock();
4130 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4131 unsigned long *entries, int cpu)
4133 unsigned long count;
4135 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4137 * If this buffer has skipped entries, then we hold all
4138 * entries for the trace and we need to ignore the
4139 * ones before the time stamp.
4141 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4142 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4143 /* total is the same as the entries */
4147 ring_buffer_overrun_cpu(buf->buffer, cpu);
4152 get_total_entries(struct array_buffer *buf,
4153 unsigned long *total, unsigned long *entries)
4161 for_each_tracing_cpu(cpu) {
4162 get_total_entries_cpu(buf, &t, &e, cpu);
4168 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4170 unsigned long total, entries;
4175 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4180 unsigned long trace_total_entries(struct trace_array *tr)
4182 unsigned long total, entries;
4187 get_total_entries(&tr->array_buffer, &total, &entries);
4192 static void print_lat_help_header(struct seq_file *m)
4194 seq_puts(m, "# _------=> CPU# \n"
4195 "# / _-----=> irqs-off/BH-disabled\n"
4196 "# | / _----=> need-resched \n"
4197 "# || / _---=> hardirq/softirq \n"
4198 "# ||| / _--=> preempt-depth \n"
4199 "# |||| / _-=> migrate-disable \n"
4200 "# ||||| / delay \n"
4201 "# cmd pid |||||| time | caller \n"
4202 "# \\ / |||||| \\ | / \n");
4205 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4207 unsigned long total;
4208 unsigned long entries;
4210 get_total_entries(buf, &total, &entries);
4211 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4212 entries, total, num_online_cpus());
4216 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4219 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4221 print_event_info(buf, m);
4223 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4224 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4227 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4230 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4231 const char *space = " ";
4232 int prec = tgid ? 12 : 2;
4234 print_event_info(buf, m);
4236 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4237 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4238 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4239 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4240 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4241 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4242 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4243 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4247 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4249 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4250 struct array_buffer *buf = iter->array_buffer;
4251 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4252 struct tracer *type = iter->trace;
4253 unsigned long entries;
4254 unsigned long total;
4255 const char *name = "preemption";
4259 get_total_entries(buf, &total, &entries);
4261 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4263 seq_puts(m, "# -----------------------------------"
4264 "---------------------------------\n");
4265 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4266 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4267 nsecs_to_usecs(data->saved_latency),
4271 #if defined(CONFIG_PREEMPT_NONE)
4273 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
4275 #elif defined(CONFIG_PREEMPT)
4277 #elif defined(CONFIG_PREEMPT_RT)
4282 /* These are reserved for later use */
4285 seq_printf(m, " #P:%d)\n", num_online_cpus());
4289 seq_puts(m, "# -----------------\n");
4290 seq_printf(m, "# | task: %.16s-%d "
4291 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4292 data->comm, data->pid,
4293 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4294 data->policy, data->rt_priority);
4295 seq_puts(m, "# -----------------\n");
4297 if (data->critical_start) {
4298 seq_puts(m, "# => started at: ");
4299 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4300 trace_print_seq(m, &iter->seq);
4301 seq_puts(m, "\n# => ended at: ");
4302 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4303 trace_print_seq(m, &iter->seq);
4304 seq_puts(m, "\n#\n");
4310 static void test_cpu_buff_start(struct trace_iterator *iter)
4312 struct trace_seq *s = &iter->seq;
4313 struct trace_array *tr = iter->tr;
4315 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4318 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4321 if (cpumask_available(iter->started) &&
4322 cpumask_test_cpu(iter->cpu, iter->started))
4325 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4328 if (cpumask_available(iter->started))
4329 cpumask_set_cpu(iter->cpu, iter->started);
4331 /* Don't print started cpu buffer for the first entry of the trace */
4333 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4337 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4339 struct trace_array *tr = iter->tr;
4340 struct trace_seq *s = &iter->seq;
4341 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4342 struct trace_entry *entry;
4343 struct trace_event *event;
4347 test_cpu_buff_start(iter);
4349 event = ftrace_find_event(entry->type);
4351 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4352 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4353 trace_print_lat_context(iter);
4355 trace_print_context(iter);
4358 if (trace_seq_has_overflowed(s))
4359 return TRACE_TYPE_PARTIAL_LINE;
4362 return event->funcs->trace(iter, sym_flags, event);
4364 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4366 return trace_handle_return(s);
4369 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4371 struct trace_array *tr = iter->tr;
4372 struct trace_seq *s = &iter->seq;
4373 struct trace_entry *entry;
4374 struct trace_event *event;
4378 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4379 trace_seq_printf(s, "%d %d %llu ",
4380 entry->pid, iter->cpu, iter->ts);
4382 if (trace_seq_has_overflowed(s))
4383 return TRACE_TYPE_PARTIAL_LINE;
4385 event = ftrace_find_event(entry->type);
4387 return event->funcs->raw(iter, 0, event);
4389 trace_seq_printf(s, "%d ?\n", entry->type);
4391 return trace_handle_return(s);
4394 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4396 struct trace_array *tr = iter->tr;
4397 struct trace_seq *s = &iter->seq;
4398 unsigned char newline = '\n';
4399 struct trace_entry *entry;
4400 struct trace_event *event;
4404 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4405 SEQ_PUT_HEX_FIELD(s, entry->pid);
4406 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4407 SEQ_PUT_HEX_FIELD(s, iter->ts);
4408 if (trace_seq_has_overflowed(s))
4409 return TRACE_TYPE_PARTIAL_LINE;
4412 event = ftrace_find_event(entry->type);
4414 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4415 if (ret != TRACE_TYPE_HANDLED)
4419 SEQ_PUT_FIELD(s, newline);
4421 return trace_handle_return(s);
4424 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4426 struct trace_array *tr = iter->tr;
4427 struct trace_seq *s = &iter->seq;
4428 struct trace_entry *entry;
4429 struct trace_event *event;
4433 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4434 SEQ_PUT_FIELD(s, entry->pid);
4435 SEQ_PUT_FIELD(s, iter->cpu);
4436 SEQ_PUT_FIELD(s, iter->ts);
4437 if (trace_seq_has_overflowed(s))
4438 return TRACE_TYPE_PARTIAL_LINE;
4441 event = ftrace_find_event(entry->type);
4442 return event ? event->funcs->binary(iter, 0, event) :
4446 int trace_empty(struct trace_iterator *iter)
4448 struct ring_buffer_iter *buf_iter;
4451 /* If we are looking at one CPU buffer, only check that one */
4452 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4453 cpu = iter->cpu_file;
4454 buf_iter = trace_buffer_iter(iter, cpu);
4456 if (!ring_buffer_iter_empty(buf_iter))
4459 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4465 for_each_tracing_cpu(cpu) {
4466 buf_iter = trace_buffer_iter(iter, cpu);
4468 if (!ring_buffer_iter_empty(buf_iter))
4471 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4479 /* Called with trace_event_read_lock() held. */
4480 enum print_line_t print_trace_line(struct trace_iterator *iter)
4482 struct trace_array *tr = iter->tr;
4483 unsigned long trace_flags = tr->trace_flags;
4484 enum print_line_t ret;
4486 if (iter->lost_events) {
4487 if (iter->lost_events == (unsigned long)-1)
4488 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4491 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4492 iter->cpu, iter->lost_events);
4493 if (trace_seq_has_overflowed(&iter->seq))
4494 return TRACE_TYPE_PARTIAL_LINE;
4497 if (iter->trace && iter->trace->print_line) {
4498 ret = iter->trace->print_line(iter);
4499 if (ret != TRACE_TYPE_UNHANDLED)
4503 if (iter->ent->type == TRACE_BPUTS &&
4504 trace_flags & TRACE_ITER_PRINTK &&
4505 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4506 return trace_print_bputs_msg_only(iter);
4508 if (iter->ent->type == TRACE_BPRINT &&
4509 trace_flags & TRACE_ITER_PRINTK &&
4510 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4511 return trace_print_bprintk_msg_only(iter);
4513 if (iter->ent->type == TRACE_PRINT &&
4514 trace_flags & TRACE_ITER_PRINTK &&
4515 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4516 return trace_print_printk_msg_only(iter);
4518 if (trace_flags & TRACE_ITER_BIN)
4519 return print_bin_fmt(iter);
4521 if (trace_flags & TRACE_ITER_HEX)
4522 return print_hex_fmt(iter);
4524 if (trace_flags & TRACE_ITER_RAW)
4525 return print_raw_fmt(iter);
4527 return print_trace_fmt(iter);
4530 void trace_latency_header(struct seq_file *m)
4532 struct trace_iterator *iter = m->private;
4533 struct trace_array *tr = iter->tr;
4535 /* print nothing if the buffers are empty */
4536 if (trace_empty(iter))
4539 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4540 print_trace_header(m, iter);
4542 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4543 print_lat_help_header(m);
4546 void trace_default_header(struct seq_file *m)
4548 struct trace_iterator *iter = m->private;
4549 struct trace_array *tr = iter->tr;
4550 unsigned long trace_flags = tr->trace_flags;
4552 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4555 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4556 /* print nothing if the buffers are empty */
4557 if (trace_empty(iter))
4559 print_trace_header(m, iter);
4560 if (!(trace_flags & TRACE_ITER_VERBOSE))
4561 print_lat_help_header(m);
4563 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4564 if (trace_flags & TRACE_ITER_IRQ_INFO)
4565 print_func_help_header_irq(iter->array_buffer,
4568 print_func_help_header(iter->array_buffer, m,
4574 static void test_ftrace_alive(struct seq_file *m)
4576 if (!ftrace_is_dead())
4578 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4579 "# MAY BE MISSING FUNCTION EVENTS\n");
4582 #ifdef CONFIG_TRACER_MAX_TRACE
4583 static void show_snapshot_main_help(struct seq_file *m)
4585 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4586 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4587 "# Takes a snapshot of the main buffer.\n"
4588 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4589 "# (Doesn't have to be '2' works with any number that\n"
4590 "# is not a '0' or '1')\n");
4593 static void show_snapshot_percpu_help(struct seq_file *m)
4595 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4596 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4597 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4598 "# Takes a snapshot of the main buffer for this cpu.\n");
4600 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4601 "# Must use main snapshot file to allocate.\n");
4603 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4604 "# (Doesn't have to be '2' works with any number that\n"
4605 "# is not a '0' or '1')\n");
4608 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4610 if (iter->tr->allocated_snapshot)
4611 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4613 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4615 seq_puts(m, "# Snapshot commands:\n");
4616 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4617 show_snapshot_main_help(m);
4619 show_snapshot_percpu_help(m);
4622 /* Should never be called */
4623 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4626 static int s_show(struct seq_file *m, void *v)
4628 struct trace_iterator *iter = v;
4631 if (iter->ent == NULL) {
4633 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4635 test_ftrace_alive(m);
4637 if (iter->snapshot && trace_empty(iter))
4638 print_snapshot_help(m, iter);
4639 else if (iter->trace && iter->trace->print_header)
4640 iter->trace->print_header(m);
4642 trace_default_header(m);
4644 } else if (iter->leftover) {
4646 * If we filled the seq_file buffer earlier, we
4647 * want to just show it now.
4649 ret = trace_print_seq(m, &iter->seq);
4651 /* ret should this time be zero, but you never know */
4652 iter->leftover = ret;
4655 print_trace_line(iter);
4656 ret = trace_print_seq(m, &iter->seq);
4658 * If we overflow the seq_file buffer, then it will
4659 * ask us for this data again at start up.
4661 * ret is 0 if seq_file write succeeded.
4664 iter->leftover = ret;
4671 * Should be used after trace_array_get(), trace_types_lock
4672 * ensures that i_cdev was already initialized.
4674 static inline int tracing_get_cpu(struct inode *inode)
4676 if (inode->i_cdev) /* See trace_create_cpu_file() */
4677 return (long)inode->i_cdev - 1;
4678 return RING_BUFFER_ALL_CPUS;
4681 static const struct seq_operations tracer_seq_ops = {
4688 static struct trace_iterator *
4689 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4691 struct trace_array *tr = inode->i_private;
4692 struct trace_iterator *iter;
4695 if (tracing_disabled)
4696 return ERR_PTR(-ENODEV);
4698 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4700 return ERR_PTR(-ENOMEM);
4702 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4704 if (!iter->buffer_iter)
4708 * trace_find_next_entry() may need to save off iter->ent.
4709 * It will place it into the iter->temp buffer. As most
4710 * events are less than 128, allocate a buffer of that size.
4711 * If one is greater, then trace_find_next_entry() will
4712 * allocate a new buffer to adjust for the bigger iter->ent.
4713 * It's not critical if it fails to get allocated here.
4715 iter->temp = kmalloc(128, GFP_KERNEL);
4717 iter->temp_size = 128;
4720 * trace_event_printf() may need to modify given format
4721 * string to replace %p with %px so that it shows real address
4722 * instead of hash value. However, that is only for the event
4723 * tracing, other tracer may not need. Defer the allocation
4724 * until it is needed.
4730 * We make a copy of the current tracer to avoid concurrent
4731 * changes on it while we are reading.
4733 mutex_lock(&trace_types_lock);
4734 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4738 *iter->trace = *tr->current_trace;
4740 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4745 #ifdef CONFIG_TRACER_MAX_TRACE
4746 /* Currently only the top directory has a snapshot */
4747 if (tr->current_trace->print_max || snapshot)
4748 iter->array_buffer = &tr->max_buffer;
4751 iter->array_buffer = &tr->array_buffer;
4752 iter->snapshot = snapshot;
4754 iter->cpu_file = tracing_get_cpu(inode);
4755 mutex_init(&iter->mutex);
4757 /* Notify the tracer early; before we stop tracing. */
4758 if (iter->trace->open)
4759 iter->trace->open(iter);
4761 /* Annotate start of buffers if we had overruns */
4762 if (ring_buffer_overruns(iter->array_buffer->buffer))
4763 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4765 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4766 if (trace_clocks[tr->clock_id].in_ns)
4767 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4770 * If pause-on-trace is enabled, then stop the trace while
4771 * dumping, unless this is the "snapshot" file
4773 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4774 tracing_stop_tr(tr);
4776 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4777 for_each_tracing_cpu(cpu) {
4778 iter->buffer_iter[cpu] =
4779 ring_buffer_read_prepare(iter->array_buffer->buffer,
4782 ring_buffer_read_prepare_sync();
4783 for_each_tracing_cpu(cpu) {
4784 ring_buffer_read_start(iter->buffer_iter[cpu]);
4785 tracing_iter_reset(iter, cpu);
4788 cpu = iter->cpu_file;
4789 iter->buffer_iter[cpu] =
4790 ring_buffer_read_prepare(iter->array_buffer->buffer,
4792 ring_buffer_read_prepare_sync();
4793 ring_buffer_read_start(iter->buffer_iter[cpu]);
4794 tracing_iter_reset(iter, cpu);
4797 mutex_unlock(&trace_types_lock);
4802 mutex_unlock(&trace_types_lock);
4805 kfree(iter->buffer_iter);
4807 seq_release_private(inode, file);
4808 return ERR_PTR(-ENOMEM);
4811 int tracing_open_generic(struct inode *inode, struct file *filp)
4815 ret = tracing_check_open_get_tr(NULL);
4819 filp->private_data = inode->i_private;
4823 bool tracing_is_disabled(void)
4825 return (tracing_disabled) ? true: false;
4829 * Open and update trace_array ref count.
4830 * Must have the current trace_array passed to it.
4832 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4834 struct trace_array *tr = inode->i_private;
4837 ret = tracing_check_open_get_tr(tr);
4841 filp->private_data = inode->i_private;
4846 static int tracing_mark_open(struct inode *inode, struct file *filp)
4848 stream_open(inode, filp);
4849 return tracing_open_generic_tr(inode, filp);
4852 static int tracing_release(struct inode *inode, struct file *file)
4854 struct trace_array *tr = inode->i_private;
4855 struct seq_file *m = file->private_data;
4856 struct trace_iterator *iter;
4859 if (!(file->f_mode & FMODE_READ)) {
4860 trace_array_put(tr);
4864 /* Writes do not use seq_file */
4866 mutex_lock(&trace_types_lock);
4868 for_each_tracing_cpu(cpu) {
4869 if (iter->buffer_iter[cpu])
4870 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4873 if (iter->trace && iter->trace->close)
4874 iter->trace->close(iter);
4876 if (!iter->snapshot && tr->stop_count)
4877 /* reenable tracing if it was previously enabled */
4878 tracing_start_tr(tr);
4880 __trace_array_put(tr);
4882 mutex_unlock(&trace_types_lock);
4884 mutex_destroy(&iter->mutex);
4885 free_cpumask_var(iter->started);
4889 kfree(iter->buffer_iter);
4890 seq_release_private(inode, file);
4895 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4897 struct trace_array *tr = inode->i_private;
4899 trace_array_put(tr);
4903 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4905 struct trace_array *tr = inode->i_private;
4907 trace_array_put(tr);
4909 return single_release(inode, file);
4912 static int tracing_open(struct inode *inode, struct file *file)
4914 struct trace_array *tr = inode->i_private;
4915 struct trace_iterator *iter;
4918 ret = tracing_check_open_get_tr(tr);
4922 /* If this file was open for write, then erase contents */
4923 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4924 int cpu = tracing_get_cpu(inode);
4925 struct array_buffer *trace_buf = &tr->array_buffer;
4927 #ifdef CONFIG_TRACER_MAX_TRACE
4928 if (tr->current_trace->print_max)
4929 trace_buf = &tr->max_buffer;
4932 if (cpu == RING_BUFFER_ALL_CPUS)
4933 tracing_reset_online_cpus(trace_buf);
4935 tracing_reset_cpu(trace_buf, cpu);
4938 if (file->f_mode & FMODE_READ) {
4939 iter = __tracing_open(inode, file, false);
4941 ret = PTR_ERR(iter);
4942 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4943 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4947 trace_array_put(tr);
4953 * Some tracers are not suitable for instance buffers.
4954 * A tracer is always available for the global array (toplevel)
4955 * or if it explicitly states that it is.
4958 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4960 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4963 /* Find the next tracer that this trace array may use */
4964 static struct tracer *
4965 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4967 while (t && !trace_ok_for_array(t, tr))
4974 t_next(struct seq_file *m, void *v, loff_t *pos)
4976 struct trace_array *tr = m->private;
4977 struct tracer *t = v;
4982 t = get_tracer_for_array(tr, t->next);
4987 static void *t_start(struct seq_file *m, loff_t *pos)
4989 struct trace_array *tr = m->private;
4993 mutex_lock(&trace_types_lock);
4995 t = get_tracer_for_array(tr, trace_types);
4996 for (; t && l < *pos; t = t_next(m, t, &l))
5002 static void t_stop(struct seq_file *m, void *p)
5004 mutex_unlock(&trace_types_lock);
5007 static int t_show(struct seq_file *m, void *v)
5009 struct tracer *t = v;
5014 seq_puts(m, t->name);
5023 static const struct seq_operations show_traces_seq_ops = {
5030 static int show_traces_open(struct inode *inode, struct file *file)
5032 struct trace_array *tr = inode->i_private;
5036 ret = tracing_check_open_get_tr(tr);
5040 ret = seq_open(file, &show_traces_seq_ops);
5042 trace_array_put(tr);
5046 m = file->private_data;
5052 static int show_traces_release(struct inode *inode, struct file *file)
5054 struct trace_array *tr = inode->i_private;
5056 trace_array_put(tr);
5057 return seq_release(inode, file);
5061 tracing_write_stub(struct file *filp, const char __user *ubuf,
5062 size_t count, loff_t *ppos)
5067 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5071 if (file->f_mode & FMODE_READ)
5072 ret = seq_lseek(file, offset, whence);
5074 file->f_pos = ret = 0;
5079 static const struct file_operations tracing_fops = {
5080 .open = tracing_open,
5082 .write = tracing_write_stub,
5083 .llseek = tracing_lseek,
5084 .release = tracing_release,
5087 static const struct file_operations show_traces_fops = {
5088 .open = show_traces_open,
5090 .llseek = seq_lseek,
5091 .release = show_traces_release,
5095 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5096 size_t count, loff_t *ppos)
5098 struct trace_array *tr = file_inode(filp)->i_private;
5102 len = snprintf(NULL, 0, "%*pb\n",
5103 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5104 mask_str = kmalloc(len, GFP_KERNEL);
5108 len = snprintf(mask_str, len, "%*pb\n",
5109 cpumask_pr_args(tr->tracing_cpumask));
5114 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5122 int tracing_set_cpumask(struct trace_array *tr,
5123 cpumask_var_t tracing_cpumask_new)
5130 local_irq_disable();
5131 arch_spin_lock(&tr->max_lock);
5132 for_each_tracing_cpu(cpu) {
5134 * Increase/decrease the disabled counter if we are
5135 * about to flip a bit in the cpumask:
5137 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5138 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5139 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5140 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5142 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5143 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5144 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5145 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5148 arch_spin_unlock(&tr->max_lock);
5151 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5157 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5158 size_t count, loff_t *ppos)
5160 struct trace_array *tr = file_inode(filp)->i_private;
5161 cpumask_var_t tracing_cpumask_new;
5164 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5167 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5171 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5175 free_cpumask_var(tracing_cpumask_new);
5180 free_cpumask_var(tracing_cpumask_new);
5185 static const struct file_operations tracing_cpumask_fops = {
5186 .open = tracing_open_generic_tr,
5187 .read = tracing_cpumask_read,
5188 .write = tracing_cpumask_write,
5189 .release = tracing_release_generic_tr,
5190 .llseek = generic_file_llseek,
5193 static int tracing_trace_options_show(struct seq_file *m, void *v)
5195 struct tracer_opt *trace_opts;
5196 struct trace_array *tr = m->private;
5200 mutex_lock(&trace_types_lock);
5201 tracer_flags = tr->current_trace->flags->val;
5202 trace_opts = tr->current_trace->flags->opts;
5204 for (i = 0; trace_options[i]; i++) {
5205 if (tr->trace_flags & (1 << i))
5206 seq_printf(m, "%s\n", trace_options[i]);
5208 seq_printf(m, "no%s\n", trace_options[i]);
5211 for (i = 0; trace_opts[i].name; i++) {
5212 if (tracer_flags & trace_opts[i].bit)
5213 seq_printf(m, "%s\n", trace_opts[i].name);
5215 seq_printf(m, "no%s\n", trace_opts[i].name);
5217 mutex_unlock(&trace_types_lock);
5222 static int __set_tracer_option(struct trace_array *tr,
5223 struct tracer_flags *tracer_flags,
5224 struct tracer_opt *opts, int neg)
5226 struct tracer *trace = tracer_flags->trace;
5229 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5234 tracer_flags->val &= ~opts->bit;
5236 tracer_flags->val |= opts->bit;
5240 /* Try to assign a tracer specific option */
5241 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5243 struct tracer *trace = tr->current_trace;
5244 struct tracer_flags *tracer_flags = trace->flags;
5245 struct tracer_opt *opts = NULL;
5248 for (i = 0; tracer_flags->opts[i].name; i++) {
5249 opts = &tracer_flags->opts[i];
5251 if (strcmp(cmp, opts->name) == 0)
5252 return __set_tracer_option(tr, trace->flags, opts, neg);
5258 /* Some tracers require overwrite to stay enabled */
5259 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5261 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5267 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5271 if ((mask == TRACE_ITER_RECORD_TGID) ||
5272 (mask == TRACE_ITER_RECORD_CMD))
5273 lockdep_assert_held(&event_mutex);
5275 /* do nothing if flag is already set */
5276 if (!!(tr->trace_flags & mask) == !!enabled)
5279 /* Give the tracer a chance to approve the change */
5280 if (tr->current_trace->flag_changed)
5281 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5285 tr->trace_flags |= mask;
5287 tr->trace_flags &= ~mask;
5289 if (mask == TRACE_ITER_RECORD_CMD)
5290 trace_event_enable_cmd_record(enabled);
5292 if (mask == TRACE_ITER_RECORD_TGID) {
5294 tgid_map_max = pid_max;
5295 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5299 * Pairs with smp_load_acquire() in
5300 * trace_find_tgid_ptr() to ensure that if it observes
5301 * the tgid_map we just allocated then it also observes
5302 * the corresponding tgid_map_max value.
5304 smp_store_release(&tgid_map, map);
5307 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5311 trace_event_enable_tgid_record(enabled);
5314 if (mask == TRACE_ITER_EVENT_FORK)
5315 trace_event_follow_fork(tr, enabled);
5317 if (mask == TRACE_ITER_FUNC_FORK)
5318 ftrace_pid_follow_fork(tr, enabled);
5320 if (mask == TRACE_ITER_OVERWRITE) {
5321 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5322 #ifdef CONFIG_TRACER_MAX_TRACE
5323 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5327 if (mask == TRACE_ITER_PRINTK) {
5328 trace_printk_start_stop_comm(enabled);
5329 trace_printk_control(enabled);
5335 int trace_set_options(struct trace_array *tr, char *option)
5340 size_t orig_len = strlen(option);
5343 cmp = strstrip(option);
5345 len = str_has_prefix(cmp, "no");
5351 mutex_lock(&event_mutex);
5352 mutex_lock(&trace_types_lock);
5354 ret = match_string(trace_options, -1, cmp);
5355 /* If no option could be set, test the specific tracer options */
5357 ret = set_tracer_option(tr, cmp, neg);
5359 ret = set_tracer_flag(tr, 1 << ret, !neg);
5361 mutex_unlock(&trace_types_lock);
5362 mutex_unlock(&event_mutex);
5365 * If the first trailing whitespace is replaced with '\0' by strstrip,
5366 * turn it back into a space.
5368 if (orig_len > strlen(option))
5369 option[strlen(option)] = ' ';
5374 static void __init apply_trace_boot_options(void)
5376 char *buf = trace_boot_options_buf;
5380 option = strsep(&buf, ",");
5386 trace_set_options(&global_trace, option);
5388 /* Put back the comma to allow this to be called again */
5395 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5396 size_t cnt, loff_t *ppos)
5398 struct seq_file *m = filp->private_data;
5399 struct trace_array *tr = m->private;
5403 if (cnt >= sizeof(buf))
5406 if (copy_from_user(buf, ubuf, cnt))
5411 ret = trace_set_options(tr, buf);
5420 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5422 struct trace_array *tr = inode->i_private;
5425 ret = tracing_check_open_get_tr(tr);
5429 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5431 trace_array_put(tr);
5436 static const struct file_operations tracing_iter_fops = {
5437 .open = tracing_trace_options_open,
5439 .llseek = seq_lseek,
5440 .release = tracing_single_release_tr,
5441 .write = tracing_trace_options_write,
5444 static const char readme_msg[] =
5445 "tracing mini-HOWTO:\n\n"
5446 "# echo 0 > tracing_on : quick way to disable tracing\n"
5447 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5448 " Important files:\n"
5449 " trace\t\t\t- The static contents of the buffer\n"
5450 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5451 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5452 " current_tracer\t- function and latency tracers\n"
5453 " available_tracers\t- list of configured tracers for current_tracer\n"
5454 " error_log\t- error log for failed commands (that support it)\n"
5455 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5456 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5457 " trace_clock\t\t-change the clock used to order events\n"
5458 " local: Per cpu clock but may not be synced across CPUs\n"
5459 " global: Synced across CPUs but slows tracing down.\n"
5460 " counter: Not a clock, but just an increment\n"
5461 " uptime: Jiffy counter from time of boot\n"
5462 " perf: Same clock that perf events use\n"
5463 #ifdef CONFIG_X86_64
5464 " x86-tsc: TSC cycle counter\n"
5466 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5467 " delta: Delta difference against a buffer-wide timestamp\n"
5468 " absolute: Absolute (standalone) timestamp\n"
5469 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5470 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5471 " tracing_cpumask\t- Limit which CPUs to trace\n"
5472 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5473 "\t\t\t Remove sub-buffer with rmdir\n"
5474 " trace_options\t\t- Set format or modify how tracing happens\n"
5475 "\t\t\t Disable an option by prefixing 'no' to the\n"
5476 "\t\t\t option name\n"
5477 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5478 #ifdef CONFIG_DYNAMIC_FTRACE
5479 "\n available_filter_functions - list of functions that can be filtered on\n"
5480 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5481 "\t\t\t functions\n"
5482 "\t accepts: func_full_name or glob-matching-pattern\n"
5483 "\t modules: Can select a group via module\n"
5484 "\t Format: :mod:<module-name>\n"
5485 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5486 "\t triggers: a command to perform when function is hit\n"
5487 "\t Format: <function>:<trigger>[:count]\n"
5488 "\t trigger: traceon, traceoff\n"
5489 "\t\t enable_event:<system>:<event>\n"
5490 "\t\t disable_event:<system>:<event>\n"
5491 #ifdef CONFIG_STACKTRACE
5494 #ifdef CONFIG_TRACER_SNAPSHOT
5499 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5500 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5501 "\t The first one will disable tracing every time do_fault is hit\n"
5502 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5503 "\t The first time do trap is hit and it disables tracing, the\n"
5504 "\t counter will decrement to 2. If tracing is already disabled,\n"
5505 "\t the counter will not decrement. It only decrements when the\n"
5506 "\t trigger did work\n"
5507 "\t To remove trigger without count:\n"
5508 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5509 "\t To remove trigger with a count:\n"
5510 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5511 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5512 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5513 "\t modules: Can select a group via module command :mod:\n"
5514 "\t Does not accept triggers\n"
5515 #endif /* CONFIG_DYNAMIC_FTRACE */
5516 #ifdef CONFIG_FUNCTION_TRACER
5517 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5519 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5522 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5523 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5524 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5525 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5527 #ifdef CONFIG_TRACER_SNAPSHOT
5528 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5529 "\t\t\t snapshot buffer. Read the contents for more\n"
5530 "\t\t\t information\n"
5532 #ifdef CONFIG_STACK_TRACER
5533 " stack_trace\t\t- Shows the max stack trace when active\n"
5534 " stack_max_size\t- Shows current max stack size that was traced\n"
5535 "\t\t\t Write into this file to reset the max size (trigger a\n"
5536 "\t\t\t new trace)\n"
5537 #ifdef CONFIG_DYNAMIC_FTRACE
5538 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5541 #endif /* CONFIG_STACK_TRACER */
5542 #ifdef CONFIG_DYNAMIC_EVENTS
5543 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5544 "\t\t\t Write into this file to define/undefine new trace events.\n"
5546 #ifdef CONFIG_KPROBE_EVENTS
5547 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5548 "\t\t\t Write into this file to define/undefine new trace events.\n"
5550 #ifdef CONFIG_UPROBE_EVENTS
5551 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5552 "\t\t\t Write into this file to define/undefine new trace events.\n"
5554 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5555 "\t accepts: event-definitions (one definition per line)\n"
5556 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5557 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5558 #ifdef CONFIG_HIST_TRIGGERS
5559 "\t s:[synthetic/]<event> <field> [<field>]\n"
5561 "\t e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]\n"
5562 "\t -:[<group>/]<event>\n"
5563 #ifdef CONFIG_KPROBE_EVENTS
5564 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5565 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5567 #ifdef CONFIG_UPROBE_EVENTS
5568 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5570 "\t args: <name>=fetcharg[:type]\n"
5571 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5572 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5573 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5575 "\t $stack<index>, $stack, $retval, $comm,\n"
5577 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5578 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5579 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5580 "\t <type>\\[<array-size>\\]\n"
5581 #ifdef CONFIG_HIST_TRIGGERS
5582 "\t field: <stype> <name>;\n"
5583 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5584 "\t [unsigned] char/int/long\n"
5586 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5587 "\t of the <attached-group>/<attached-event>.\n"
5589 " events/\t\t- Directory containing all trace event subsystems:\n"
5590 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5591 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5592 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5594 " filter\t\t- If set, only events passing filter are traced\n"
5595 " events/<system>/<event>/\t- Directory containing control files for\n"
5597 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5598 " filter\t\t- If set, only events passing filter are traced\n"
5599 " trigger\t\t- If set, a command to perform when event is hit\n"
5600 "\t Format: <trigger>[:count][if <filter>]\n"
5601 "\t trigger: traceon, traceoff\n"
5602 "\t enable_event:<system>:<event>\n"
5603 "\t disable_event:<system>:<event>\n"
5604 #ifdef CONFIG_HIST_TRIGGERS
5605 "\t enable_hist:<system>:<event>\n"
5606 "\t disable_hist:<system>:<event>\n"
5608 #ifdef CONFIG_STACKTRACE
5611 #ifdef CONFIG_TRACER_SNAPSHOT
5614 #ifdef CONFIG_HIST_TRIGGERS
5615 "\t\t hist (see below)\n"
5617 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5618 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5619 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5620 "\t events/block/block_unplug/trigger\n"
5621 "\t The first disables tracing every time block_unplug is hit.\n"
5622 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5623 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5624 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5625 "\t Like function triggers, the counter is only decremented if it\n"
5626 "\t enabled or disabled tracing.\n"
5627 "\t To remove a trigger without a count:\n"
5628 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5629 "\t To remove a trigger with a count:\n"
5630 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5631 "\t Filters can be ignored when removing a trigger.\n"
5632 #ifdef CONFIG_HIST_TRIGGERS
5633 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5634 "\t Format: hist:keys=<field1[,field2,...]>\n"
5635 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5636 "\t [:values=<field1[,field2,...]>]\n"
5637 "\t [:sort=<field1[,field2,...]>]\n"
5638 "\t [:size=#entries]\n"
5639 "\t [:pause][:continue][:clear]\n"
5640 "\t [:name=histname1]\n"
5641 "\t [:<handler>.<action>]\n"
5642 "\t [if <filter>]\n\n"
5643 "\t Note, special fields can be used as well:\n"
5644 "\t common_timestamp - to record current timestamp\n"
5645 "\t common_cpu - to record the CPU the event happened on\n"
5647 "\t A hist trigger variable can be:\n"
5648 "\t - a reference to a field e.g. x=current_timestamp,\n"
5649 "\t - a reference to another variable e.g. y=$x,\n"
5650 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5651 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5653 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5654 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5655 "\t variable reference, field or numeric literal.\n"
5657 "\t When a matching event is hit, an entry is added to a hash\n"
5658 "\t table using the key(s) and value(s) named, and the value of a\n"
5659 "\t sum called 'hitcount' is incremented. Keys and values\n"
5660 "\t correspond to fields in the event's format description. Keys\n"
5661 "\t can be any field, or the special string 'stacktrace'.\n"
5662 "\t Compound keys consisting of up to two fields can be specified\n"
5663 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5664 "\t fields. Sort keys consisting of up to two fields can be\n"
5665 "\t specified using the 'sort' keyword. The sort direction can\n"
5666 "\t be modified by appending '.descending' or '.ascending' to a\n"
5667 "\t sort field. The 'size' parameter can be used to specify more\n"
5668 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5669 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5670 "\t its histogram data will be shared with other triggers of the\n"
5671 "\t same name, and trigger hits will update this common data.\n\n"
5672 "\t Reading the 'hist' file for the event will dump the hash\n"
5673 "\t table in its entirety to stdout. If there are multiple hist\n"
5674 "\t triggers attached to an event, there will be a table for each\n"
5675 "\t trigger in the output. The table displayed for a named\n"
5676 "\t trigger will be the same as any other instance having the\n"
5677 "\t same name. The default format used to display a given field\n"
5678 "\t can be modified by appending any of the following modifiers\n"
5679 "\t to the field name, as applicable:\n\n"
5680 "\t .hex display a number as a hex value\n"
5681 "\t .sym display an address as a symbol\n"
5682 "\t .sym-offset display an address as a symbol and offset\n"
5683 "\t .execname display a common_pid as a program name\n"
5684 "\t .syscall display a syscall id as a syscall name\n"
5685 "\t .log2 display log2 value rather than raw number\n"
5686 "\t .buckets=size display values in groups of size rather than raw number\n"
5687 "\t .usecs display a common_timestamp in microseconds\n\n"
5688 "\t The 'pause' parameter can be used to pause an existing hist\n"
5689 "\t trigger or to start a hist trigger but not log any events\n"
5690 "\t until told to do so. 'continue' can be used to start or\n"
5691 "\t restart a paused hist trigger.\n\n"
5692 "\t The 'clear' parameter will clear the contents of a running\n"
5693 "\t hist trigger and leave its current paused/active state\n"
5695 "\t The enable_hist and disable_hist triggers can be used to\n"
5696 "\t have one event conditionally start and stop another event's\n"
5697 "\t already-attached hist trigger. The syntax is analogous to\n"
5698 "\t the enable_event and disable_event triggers.\n\n"
5699 "\t Hist trigger handlers and actions are executed whenever a\n"
5700 "\t a histogram entry is added or updated. They take the form:\n\n"
5701 "\t <handler>.<action>\n\n"
5702 "\t The available handlers are:\n\n"
5703 "\t onmatch(matching.event) - invoke on addition or update\n"
5704 "\t onmax(var) - invoke if var exceeds current max\n"
5705 "\t onchange(var) - invoke action if var changes\n\n"
5706 "\t The available actions are:\n\n"
5707 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5708 "\t save(field,...) - save current event fields\n"
5709 #ifdef CONFIG_TRACER_SNAPSHOT
5710 "\t snapshot() - snapshot the trace buffer\n\n"
5712 #ifdef CONFIG_SYNTH_EVENTS
5713 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5714 "\t Write into this file to define/undefine new synthetic events.\n"
5715 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5721 tracing_readme_read(struct file *filp, char __user *ubuf,
5722 size_t cnt, loff_t *ppos)
5724 return simple_read_from_buffer(ubuf, cnt, ppos,
5725 readme_msg, strlen(readme_msg));
5728 static const struct file_operations tracing_readme_fops = {
5729 .open = tracing_open_generic,
5730 .read = tracing_readme_read,
5731 .llseek = generic_file_llseek,
5734 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5738 return trace_find_tgid_ptr(pid);
5741 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5745 return trace_find_tgid_ptr(pid);
5748 static void saved_tgids_stop(struct seq_file *m, void *v)
5752 static int saved_tgids_show(struct seq_file *m, void *v)
5754 int *entry = (int *)v;
5755 int pid = entry - tgid_map;
5761 seq_printf(m, "%d %d\n", pid, tgid);
5765 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5766 .start = saved_tgids_start,
5767 .stop = saved_tgids_stop,
5768 .next = saved_tgids_next,
5769 .show = saved_tgids_show,
5772 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5776 ret = tracing_check_open_get_tr(NULL);
5780 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5784 static const struct file_operations tracing_saved_tgids_fops = {
5785 .open = tracing_saved_tgids_open,
5787 .llseek = seq_lseek,
5788 .release = seq_release,
5791 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5793 unsigned int *ptr = v;
5795 if (*pos || m->count)
5800 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5802 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5811 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5817 arch_spin_lock(&trace_cmdline_lock);
5819 v = &savedcmd->map_cmdline_to_pid[0];
5821 v = saved_cmdlines_next(m, v, &l);
5829 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5831 arch_spin_unlock(&trace_cmdline_lock);
5835 static int saved_cmdlines_show(struct seq_file *m, void *v)
5837 char buf[TASK_COMM_LEN];
5838 unsigned int *pid = v;
5840 __trace_find_cmdline(*pid, buf);
5841 seq_printf(m, "%d %s\n", *pid, buf);
5845 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5846 .start = saved_cmdlines_start,
5847 .next = saved_cmdlines_next,
5848 .stop = saved_cmdlines_stop,
5849 .show = saved_cmdlines_show,
5852 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5856 ret = tracing_check_open_get_tr(NULL);
5860 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5863 static const struct file_operations tracing_saved_cmdlines_fops = {
5864 .open = tracing_saved_cmdlines_open,
5866 .llseek = seq_lseek,
5867 .release = seq_release,
5871 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5872 size_t cnt, loff_t *ppos)
5877 arch_spin_lock(&trace_cmdline_lock);
5878 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5879 arch_spin_unlock(&trace_cmdline_lock);
5881 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5884 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5886 kfree(s->saved_cmdlines);
5887 kfree(s->map_cmdline_to_pid);
5891 static int tracing_resize_saved_cmdlines(unsigned int val)
5893 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5895 s = kmalloc(sizeof(*s), GFP_KERNEL);
5899 if (allocate_cmdlines_buffer(val, s) < 0) {
5904 arch_spin_lock(&trace_cmdline_lock);
5905 savedcmd_temp = savedcmd;
5907 arch_spin_unlock(&trace_cmdline_lock);
5908 free_saved_cmdlines_buffer(savedcmd_temp);
5914 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5915 size_t cnt, loff_t *ppos)
5920 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5924 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5925 if (!val || val > PID_MAX_DEFAULT)
5928 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5937 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5938 .open = tracing_open_generic,
5939 .read = tracing_saved_cmdlines_size_read,
5940 .write = tracing_saved_cmdlines_size_write,
5943 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5944 static union trace_eval_map_item *
5945 update_eval_map(union trace_eval_map_item *ptr)
5947 if (!ptr->map.eval_string) {
5948 if (ptr->tail.next) {
5949 ptr = ptr->tail.next;
5950 /* Set ptr to the next real item (skip head) */
5958 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5960 union trace_eval_map_item *ptr = v;
5963 * Paranoid! If ptr points to end, we don't want to increment past it.
5964 * This really should never happen.
5967 ptr = update_eval_map(ptr);
5968 if (WARN_ON_ONCE(!ptr))
5972 ptr = update_eval_map(ptr);
5977 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5979 union trace_eval_map_item *v;
5982 mutex_lock(&trace_eval_mutex);
5984 v = trace_eval_maps;
5988 while (v && l < *pos) {
5989 v = eval_map_next(m, v, &l);
5995 static void eval_map_stop(struct seq_file *m, void *v)
5997 mutex_unlock(&trace_eval_mutex);
6000 static int eval_map_show(struct seq_file *m, void *v)
6002 union trace_eval_map_item *ptr = v;
6004 seq_printf(m, "%s %ld (%s)\n",
6005 ptr->map.eval_string, ptr->map.eval_value,
6011 static const struct seq_operations tracing_eval_map_seq_ops = {
6012 .start = eval_map_start,
6013 .next = eval_map_next,
6014 .stop = eval_map_stop,
6015 .show = eval_map_show,
6018 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6022 ret = tracing_check_open_get_tr(NULL);
6026 return seq_open(filp, &tracing_eval_map_seq_ops);
6029 static const struct file_operations tracing_eval_map_fops = {
6030 .open = tracing_eval_map_open,
6032 .llseek = seq_lseek,
6033 .release = seq_release,
6036 static inline union trace_eval_map_item *
6037 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6039 /* Return tail of array given the head */
6040 return ptr + ptr->head.length + 1;
6044 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6047 struct trace_eval_map **stop;
6048 struct trace_eval_map **map;
6049 union trace_eval_map_item *map_array;
6050 union trace_eval_map_item *ptr;
6055 * The trace_eval_maps contains the map plus a head and tail item,
6056 * where the head holds the module and length of array, and the
6057 * tail holds a pointer to the next list.
6059 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6061 pr_warn("Unable to allocate trace eval mapping\n");
6065 mutex_lock(&trace_eval_mutex);
6067 if (!trace_eval_maps)
6068 trace_eval_maps = map_array;
6070 ptr = trace_eval_maps;
6072 ptr = trace_eval_jmp_to_tail(ptr);
6073 if (!ptr->tail.next)
6075 ptr = ptr->tail.next;
6078 ptr->tail.next = map_array;
6080 map_array->head.mod = mod;
6081 map_array->head.length = len;
6084 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6085 map_array->map = **map;
6088 memset(map_array, 0, sizeof(*map_array));
6090 mutex_unlock(&trace_eval_mutex);
6093 static void trace_create_eval_file(struct dentry *d_tracer)
6095 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6096 NULL, &tracing_eval_map_fops);
6099 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6100 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6101 static inline void trace_insert_eval_map_file(struct module *mod,
6102 struct trace_eval_map **start, int len) { }
6103 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6105 static void trace_insert_eval_map(struct module *mod,
6106 struct trace_eval_map **start, int len)
6108 struct trace_eval_map **map;
6115 trace_event_eval_update(map, len);
6117 trace_insert_eval_map_file(mod, start, len);
6121 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6122 size_t cnt, loff_t *ppos)
6124 struct trace_array *tr = filp->private_data;
6125 char buf[MAX_TRACER_SIZE+2];
6128 mutex_lock(&trace_types_lock);
6129 r = sprintf(buf, "%s\n", tr->current_trace->name);
6130 mutex_unlock(&trace_types_lock);
6132 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6135 int tracer_init(struct tracer *t, struct trace_array *tr)
6137 tracing_reset_online_cpus(&tr->array_buffer);
6141 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6145 for_each_tracing_cpu(cpu)
6146 per_cpu_ptr(buf->data, cpu)->entries = val;
6149 #ifdef CONFIG_TRACER_MAX_TRACE
6150 /* resize @tr's buffer to the size of @size_tr's entries */
6151 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6152 struct array_buffer *size_buf, int cpu_id)
6156 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6157 for_each_tracing_cpu(cpu) {
6158 ret = ring_buffer_resize(trace_buf->buffer,
6159 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6162 per_cpu_ptr(trace_buf->data, cpu)->entries =
6163 per_cpu_ptr(size_buf->data, cpu)->entries;
6166 ret = ring_buffer_resize(trace_buf->buffer,
6167 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6169 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6170 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6175 #endif /* CONFIG_TRACER_MAX_TRACE */
6177 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6178 unsigned long size, int cpu)
6183 * If kernel or user changes the size of the ring buffer
6184 * we use the size that was given, and we can forget about
6185 * expanding it later.
6187 ring_buffer_expanded = true;
6189 /* May be called before buffers are initialized */
6190 if (!tr->array_buffer.buffer)
6193 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6197 #ifdef CONFIG_TRACER_MAX_TRACE
6198 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6199 !tr->current_trace->use_max_tr)
6202 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6204 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6205 &tr->array_buffer, cpu);
6208 * AARGH! We are left with different
6209 * size max buffer!!!!
6210 * The max buffer is our "snapshot" buffer.
6211 * When a tracer needs a snapshot (one of the
6212 * latency tracers), it swaps the max buffer
6213 * with the saved snap shot. We succeeded to
6214 * update the size of the main buffer, but failed to
6215 * update the size of the max buffer. But when we tried
6216 * to reset the main buffer to the original size, we
6217 * failed there too. This is very unlikely to
6218 * happen, but if it does, warn and kill all
6222 tracing_disabled = 1;
6227 if (cpu == RING_BUFFER_ALL_CPUS)
6228 set_buffer_entries(&tr->max_buffer, size);
6230 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6233 #endif /* CONFIG_TRACER_MAX_TRACE */
6235 if (cpu == RING_BUFFER_ALL_CPUS)
6236 set_buffer_entries(&tr->array_buffer, size);
6238 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6243 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6244 unsigned long size, int cpu_id)
6248 mutex_lock(&trace_types_lock);
6250 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6251 /* make sure, this cpu is enabled in the mask */
6252 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6258 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6263 mutex_unlock(&trace_types_lock);
6270 * tracing_update_buffers - used by tracing facility to expand ring buffers
6272 * To save on memory when the tracing is never used on a system with it
6273 * configured in. The ring buffers are set to a minimum size. But once
6274 * a user starts to use the tracing facility, then they need to grow
6275 * to their default size.
6277 * This function is to be called when a tracer is about to be used.
6279 int tracing_update_buffers(void)
6283 mutex_lock(&trace_types_lock);
6284 if (!ring_buffer_expanded)
6285 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6286 RING_BUFFER_ALL_CPUS);
6287 mutex_unlock(&trace_types_lock);
6292 struct trace_option_dentry;
6295 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6298 * Used to clear out the tracer before deletion of an instance.
6299 * Must have trace_types_lock held.
6301 static void tracing_set_nop(struct trace_array *tr)
6303 if (tr->current_trace == &nop_trace)
6306 tr->current_trace->enabled--;
6308 if (tr->current_trace->reset)
6309 tr->current_trace->reset(tr);
6311 tr->current_trace = &nop_trace;
6314 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6316 /* Only enable if the directory has been created already. */
6320 create_trace_option_files(tr, t);
6323 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6326 #ifdef CONFIG_TRACER_MAX_TRACE
6331 mutex_lock(&trace_types_lock);
6333 if (!ring_buffer_expanded) {
6334 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6335 RING_BUFFER_ALL_CPUS);
6341 for (t = trace_types; t; t = t->next) {
6342 if (strcmp(t->name, buf) == 0)
6349 if (t == tr->current_trace)
6352 #ifdef CONFIG_TRACER_SNAPSHOT
6353 if (t->use_max_tr) {
6354 arch_spin_lock(&tr->max_lock);
6355 if (tr->cond_snapshot)
6357 arch_spin_unlock(&tr->max_lock);
6362 /* Some tracers won't work on kernel command line */
6363 if (system_state < SYSTEM_RUNNING && t->noboot) {
6364 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6369 /* Some tracers are only allowed for the top level buffer */
6370 if (!trace_ok_for_array(t, tr)) {
6375 /* If trace pipe files are being read, we can't change the tracer */
6376 if (tr->trace_ref) {
6381 trace_branch_disable();
6383 tr->current_trace->enabled--;
6385 if (tr->current_trace->reset)
6386 tr->current_trace->reset(tr);
6388 /* Current trace needs to be nop_trace before synchronize_rcu */
6389 tr->current_trace = &nop_trace;
6391 #ifdef CONFIG_TRACER_MAX_TRACE
6392 had_max_tr = tr->allocated_snapshot;
6394 if (had_max_tr && !t->use_max_tr) {
6396 * We need to make sure that the update_max_tr sees that
6397 * current_trace changed to nop_trace to keep it from
6398 * swapping the buffers after we resize it.
6399 * The update_max_tr is called from interrupts disabled
6400 * so a synchronized_sched() is sufficient.
6407 #ifdef CONFIG_TRACER_MAX_TRACE
6408 if (t->use_max_tr && !had_max_tr) {
6409 ret = tracing_alloc_snapshot_instance(tr);
6416 ret = tracer_init(t, tr);
6421 tr->current_trace = t;
6422 tr->current_trace->enabled++;
6423 trace_branch_enable(tr);
6425 mutex_unlock(&trace_types_lock);
6431 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6432 size_t cnt, loff_t *ppos)
6434 struct trace_array *tr = filp->private_data;
6435 char buf[MAX_TRACER_SIZE+1];
6442 if (cnt > MAX_TRACER_SIZE)
6443 cnt = MAX_TRACER_SIZE;
6445 if (copy_from_user(buf, ubuf, cnt))
6450 /* strip ending whitespace. */
6451 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6454 err = tracing_set_tracer(tr, buf);
6464 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6465 size_t cnt, loff_t *ppos)
6470 r = snprintf(buf, sizeof(buf), "%ld\n",
6471 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6472 if (r > sizeof(buf))
6474 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6478 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6479 size_t cnt, loff_t *ppos)
6484 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6494 tracing_thresh_read(struct file *filp, char __user *ubuf,
6495 size_t cnt, loff_t *ppos)
6497 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6501 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6502 size_t cnt, loff_t *ppos)
6504 struct trace_array *tr = filp->private_data;
6507 mutex_lock(&trace_types_lock);
6508 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6512 if (tr->current_trace->update_thresh) {
6513 ret = tr->current_trace->update_thresh(tr);
6520 mutex_unlock(&trace_types_lock);
6525 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6528 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6529 size_t cnt, loff_t *ppos)
6531 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6535 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6536 size_t cnt, loff_t *ppos)
6538 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6543 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6545 struct trace_array *tr = inode->i_private;
6546 struct trace_iterator *iter;
6549 ret = tracing_check_open_get_tr(tr);
6553 mutex_lock(&trace_types_lock);
6555 /* create a buffer to store the information to pass to userspace */
6556 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6559 __trace_array_put(tr);
6563 trace_seq_init(&iter->seq);
6564 iter->trace = tr->current_trace;
6566 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6571 /* trace pipe does not show start of buffer */
6572 cpumask_setall(iter->started);
6574 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6575 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6577 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6578 if (trace_clocks[tr->clock_id].in_ns)
6579 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6582 iter->array_buffer = &tr->array_buffer;
6583 iter->cpu_file = tracing_get_cpu(inode);
6584 mutex_init(&iter->mutex);
6585 filp->private_data = iter;
6587 if (iter->trace->pipe_open)
6588 iter->trace->pipe_open(iter);
6590 nonseekable_open(inode, filp);
6594 mutex_unlock(&trace_types_lock);
6599 __trace_array_put(tr);
6600 mutex_unlock(&trace_types_lock);
6604 static int tracing_release_pipe(struct inode *inode, struct file *file)
6606 struct trace_iterator *iter = file->private_data;
6607 struct trace_array *tr = inode->i_private;
6609 mutex_lock(&trace_types_lock);
6613 if (iter->trace->pipe_close)
6614 iter->trace->pipe_close(iter);
6616 mutex_unlock(&trace_types_lock);
6618 free_cpumask_var(iter->started);
6619 mutex_destroy(&iter->mutex);
6622 trace_array_put(tr);
6628 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6630 struct trace_array *tr = iter->tr;
6632 /* Iterators are static, they should be filled or empty */
6633 if (trace_buffer_iter(iter, iter->cpu_file))
6634 return EPOLLIN | EPOLLRDNORM;
6636 if (tr->trace_flags & TRACE_ITER_BLOCK)
6638 * Always select as readable when in blocking mode
6640 return EPOLLIN | EPOLLRDNORM;
6642 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6647 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6649 struct trace_iterator *iter = filp->private_data;
6651 return trace_poll(iter, filp, poll_table);
6654 /* Must be called with iter->mutex held. */
6655 static int tracing_wait_pipe(struct file *filp)
6657 struct trace_iterator *iter = filp->private_data;
6660 while (trace_empty(iter)) {
6662 if ((filp->f_flags & O_NONBLOCK)) {
6667 * We block until we read something and tracing is disabled.
6668 * We still block if tracing is disabled, but we have never
6669 * read anything. This allows a user to cat this file, and
6670 * then enable tracing. But after we have read something,
6671 * we give an EOF when tracing is again disabled.
6673 * iter->pos will be 0 if we haven't read anything.
6675 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6678 mutex_unlock(&iter->mutex);
6680 ret = wait_on_pipe(iter, 0);
6682 mutex_lock(&iter->mutex);
6695 tracing_read_pipe(struct file *filp, char __user *ubuf,
6696 size_t cnt, loff_t *ppos)
6698 struct trace_iterator *iter = filp->private_data;
6702 * Avoid more than one consumer on a single file descriptor
6703 * This is just a matter of traces coherency, the ring buffer itself
6706 mutex_lock(&iter->mutex);
6708 /* return any leftover data */
6709 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6713 trace_seq_init(&iter->seq);
6715 if (iter->trace->read) {
6716 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6722 sret = tracing_wait_pipe(filp);
6726 /* stop when tracing is finished */
6727 if (trace_empty(iter)) {
6732 if (cnt >= PAGE_SIZE)
6733 cnt = PAGE_SIZE - 1;
6735 /* reset all but tr, trace, and overruns */
6736 trace_iterator_reset(iter);
6737 cpumask_clear(iter->started);
6738 trace_seq_init(&iter->seq);
6740 trace_event_read_lock();
6741 trace_access_lock(iter->cpu_file);
6742 while (trace_find_next_entry_inc(iter) != NULL) {
6743 enum print_line_t ret;
6744 int save_len = iter->seq.seq.len;
6746 ret = print_trace_line(iter);
6747 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6748 /* don't print partial lines */
6749 iter->seq.seq.len = save_len;
6752 if (ret != TRACE_TYPE_NO_CONSUME)
6753 trace_consume(iter);
6755 if (trace_seq_used(&iter->seq) >= cnt)
6759 * Setting the full flag means we reached the trace_seq buffer
6760 * size and we should leave by partial output condition above.
6761 * One of the trace_seq_* functions is not used properly.
6763 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6766 trace_access_unlock(iter->cpu_file);
6767 trace_event_read_unlock();
6769 /* Now copy what we have to the user */
6770 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6771 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6772 trace_seq_init(&iter->seq);
6775 * If there was nothing to send to user, in spite of consuming trace
6776 * entries, go back to wait for more entries.
6782 mutex_unlock(&iter->mutex);
6787 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6790 __free_page(spd->pages[idx]);
6794 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6800 /* Seq buffer is page-sized, exactly what we need. */
6802 save_len = iter->seq.seq.len;
6803 ret = print_trace_line(iter);
6805 if (trace_seq_has_overflowed(&iter->seq)) {
6806 iter->seq.seq.len = save_len;
6811 * This should not be hit, because it should only
6812 * be set if the iter->seq overflowed. But check it
6813 * anyway to be safe.
6815 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6816 iter->seq.seq.len = save_len;
6820 count = trace_seq_used(&iter->seq) - save_len;
6823 iter->seq.seq.len = save_len;
6827 if (ret != TRACE_TYPE_NO_CONSUME)
6828 trace_consume(iter);
6830 if (!trace_find_next_entry_inc(iter)) {
6840 static ssize_t tracing_splice_read_pipe(struct file *filp,
6842 struct pipe_inode_info *pipe,
6846 struct page *pages_def[PIPE_DEF_BUFFERS];
6847 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6848 struct trace_iterator *iter = filp->private_data;
6849 struct splice_pipe_desc spd = {
6851 .partial = partial_def,
6852 .nr_pages = 0, /* This gets updated below. */
6853 .nr_pages_max = PIPE_DEF_BUFFERS,
6854 .ops = &default_pipe_buf_ops,
6855 .spd_release = tracing_spd_release_pipe,
6861 if (splice_grow_spd(pipe, &spd))
6864 mutex_lock(&iter->mutex);
6866 if (iter->trace->splice_read) {
6867 ret = iter->trace->splice_read(iter, filp,
6868 ppos, pipe, len, flags);
6873 ret = tracing_wait_pipe(filp);
6877 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6882 trace_event_read_lock();
6883 trace_access_lock(iter->cpu_file);
6885 /* Fill as many pages as possible. */
6886 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6887 spd.pages[i] = alloc_page(GFP_KERNEL);
6891 rem = tracing_fill_pipe_page(rem, iter);
6893 /* Copy the data into the page, so we can start over. */
6894 ret = trace_seq_to_buffer(&iter->seq,
6895 page_address(spd.pages[i]),
6896 trace_seq_used(&iter->seq));
6898 __free_page(spd.pages[i]);
6901 spd.partial[i].offset = 0;
6902 spd.partial[i].len = trace_seq_used(&iter->seq);
6904 trace_seq_init(&iter->seq);
6907 trace_access_unlock(iter->cpu_file);
6908 trace_event_read_unlock();
6909 mutex_unlock(&iter->mutex);
6914 ret = splice_to_pipe(pipe, &spd);
6918 splice_shrink_spd(&spd);
6922 mutex_unlock(&iter->mutex);
6927 tracing_entries_read(struct file *filp, char __user *ubuf,
6928 size_t cnt, loff_t *ppos)
6930 struct inode *inode = file_inode(filp);
6931 struct trace_array *tr = inode->i_private;
6932 int cpu = tracing_get_cpu(inode);
6937 mutex_lock(&trace_types_lock);
6939 if (cpu == RING_BUFFER_ALL_CPUS) {
6940 int cpu, buf_size_same;
6945 /* check if all cpu sizes are same */
6946 for_each_tracing_cpu(cpu) {
6947 /* fill in the size from first enabled cpu */
6949 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6950 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6956 if (buf_size_same) {
6957 if (!ring_buffer_expanded)
6958 r = sprintf(buf, "%lu (expanded: %lu)\n",
6960 trace_buf_size >> 10);
6962 r = sprintf(buf, "%lu\n", size >> 10);
6964 r = sprintf(buf, "X\n");
6966 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6968 mutex_unlock(&trace_types_lock);
6970 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6975 tracing_entries_write(struct file *filp, const char __user *ubuf,
6976 size_t cnt, loff_t *ppos)
6978 struct inode *inode = file_inode(filp);
6979 struct trace_array *tr = inode->i_private;
6983 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6987 /* must have at least 1 entry */
6991 /* value is in KB */
6993 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7003 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7004 size_t cnt, loff_t *ppos)
7006 struct trace_array *tr = filp->private_data;
7009 unsigned long size = 0, expanded_size = 0;
7011 mutex_lock(&trace_types_lock);
7012 for_each_tracing_cpu(cpu) {
7013 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7014 if (!ring_buffer_expanded)
7015 expanded_size += trace_buf_size >> 10;
7017 if (ring_buffer_expanded)
7018 r = sprintf(buf, "%lu\n", size);
7020 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7021 mutex_unlock(&trace_types_lock);
7023 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7027 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7028 size_t cnt, loff_t *ppos)
7031 * There is no need to read what the user has written, this function
7032 * is just to make sure that there is no error when "echo" is used
7041 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7043 struct trace_array *tr = inode->i_private;
7045 /* disable tracing ? */
7046 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7047 tracer_tracing_off(tr);
7048 /* resize the ring buffer to 0 */
7049 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7051 trace_array_put(tr);
7057 tracing_mark_write(struct file *filp, const char __user *ubuf,
7058 size_t cnt, loff_t *fpos)
7060 struct trace_array *tr = filp->private_data;
7061 struct ring_buffer_event *event;
7062 enum event_trigger_type tt = ETT_NONE;
7063 struct trace_buffer *buffer;
7064 struct print_entry *entry;
7069 /* Used in tracing_mark_raw_write() as well */
7070 #define FAULTED_STR "<faulted>"
7071 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7073 if (tracing_disabled)
7076 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7079 if (cnt > TRACE_BUF_SIZE)
7080 cnt = TRACE_BUF_SIZE;
7082 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7084 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7086 /* If less than "<faulted>", then make sure we can still add that */
7087 if (cnt < FAULTED_SIZE)
7088 size += FAULTED_SIZE - cnt;
7090 buffer = tr->array_buffer.buffer;
7091 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7093 if (unlikely(!event))
7094 /* Ring buffer disabled, return as if not open for write */
7097 entry = ring_buffer_event_data(event);
7098 entry->ip = _THIS_IP_;
7100 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7102 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7108 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7109 /* do not add \n before testing triggers, but add \0 */
7110 entry->buf[cnt] = '\0';
7111 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7114 if (entry->buf[cnt - 1] != '\n') {
7115 entry->buf[cnt] = '\n';
7116 entry->buf[cnt + 1] = '\0';
7118 entry->buf[cnt] = '\0';
7120 if (static_branch_unlikely(&trace_marker_exports_enabled))
7121 ftrace_exports(event, TRACE_EXPORT_MARKER);
7122 __buffer_unlock_commit(buffer, event);
7125 event_triggers_post_call(tr->trace_marker_file, tt);
7130 /* Limit it for now to 3K (including tag) */
7131 #define RAW_DATA_MAX_SIZE (1024*3)
7134 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7135 size_t cnt, loff_t *fpos)
7137 struct trace_array *tr = filp->private_data;
7138 struct ring_buffer_event *event;
7139 struct trace_buffer *buffer;
7140 struct raw_data_entry *entry;
7145 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7147 if (tracing_disabled)
7150 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7153 /* The marker must at least have a tag id */
7154 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7157 if (cnt > TRACE_BUF_SIZE)
7158 cnt = TRACE_BUF_SIZE;
7160 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7162 size = sizeof(*entry) + cnt;
7163 if (cnt < FAULT_SIZE_ID)
7164 size += FAULT_SIZE_ID - cnt;
7166 buffer = tr->array_buffer.buffer;
7167 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7170 /* Ring buffer disabled, return as if not open for write */
7173 entry = ring_buffer_event_data(event);
7175 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7178 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7183 __buffer_unlock_commit(buffer, event);
7188 static int tracing_clock_show(struct seq_file *m, void *v)
7190 struct trace_array *tr = m->private;
7193 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7195 "%s%s%s%s", i ? " " : "",
7196 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7197 i == tr->clock_id ? "]" : "");
7203 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7207 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7208 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7211 if (i == ARRAY_SIZE(trace_clocks))
7214 mutex_lock(&trace_types_lock);
7218 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7221 * New clock may not be consistent with the previous clock.
7222 * Reset the buffer so that it doesn't have incomparable timestamps.
7224 tracing_reset_online_cpus(&tr->array_buffer);
7226 #ifdef CONFIG_TRACER_MAX_TRACE
7227 if (tr->max_buffer.buffer)
7228 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7229 tracing_reset_online_cpus(&tr->max_buffer);
7232 mutex_unlock(&trace_types_lock);
7237 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7238 size_t cnt, loff_t *fpos)
7240 struct seq_file *m = filp->private_data;
7241 struct trace_array *tr = m->private;
7243 const char *clockstr;
7246 if (cnt >= sizeof(buf))
7249 if (copy_from_user(buf, ubuf, cnt))
7254 clockstr = strstrip(buf);
7256 ret = tracing_set_clock(tr, clockstr);
7265 static int tracing_clock_open(struct inode *inode, struct file *file)
7267 struct trace_array *tr = inode->i_private;
7270 ret = tracing_check_open_get_tr(tr);
7274 ret = single_open(file, tracing_clock_show, inode->i_private);
7276 trace_array_put(tr);
7281 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7283 struct trace_array *tr = m->private;
7285 mutex_lock(&trace_types_lock);
7287 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7288 seq_puts(m, "delta [absolute]\n");
7290 seq_puts(m, "[delta] absolute\n");
7292 mutex_unlock(&trace_types_lock);
7297 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7299 struct trace_array *tr = inode->i_private;
7302 ret = tracing_check_open_get_tr(tr);
7306 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7308 trace_array_put(tr);
7313 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7315 if (rbe == this_cpu_read(trace_buffered_event))
7316 return ring_buffer_time_stamp(buffer);
7318 return ring_buffer_event_time_stamp(buffer, rbe);
7322 * Set or disable using the per CPU trace_buffer_event when possible.
7324 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7328 mutex_lock(&trace_types_lock);
7330 if (set && tr->no_filter_buffering_ref++)
7334 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7339 --tr->no_filter_buffering_ref;
7342 mutex_unlock(&trace_types_lock);
7347 struct ftrace_buffer_info {
7348 struct trace_iterator iter;
7350 unsigned int spare_cpu;
7354 #ifdef CONFIG_TRACER_SNAPSHOT
7355 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7357 struct trace_array *tr = inode->i_private;
7358 struct trace_iterator *iter;
7362 ret = tracing_check_open_get_tr(tr);
7366 if (file->f_mode & FMODE_READ) {
7367 iter = __tracing_open(inode, file, true);
7369 ret = PTR_ERR(iter);
7371 /* Writes still need the seq_file to hold the private data */
7373 m = kzalloc(sizeof(*m), GFP_KERNEL);
7376 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7384 iter->array_buffer = &tr->max_buffer;
7385 iter->cpu_file = tracing_get_cpu(inode);
7387 file->private_data = m;
7391 trace_array_put(tr);
7397 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7400 struct seq_file *m = filp->private_data;
7401 struct trace_iterator *iter = m->private;
7402 struct trace_array *tr = iter->tr;
7406 ret = tracing_update_buffers();
7410 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7414 mutex_lock(&trace_types_lock);
7416 if (tr->current_trace->use_max_tr) {
7421 arch_spin_lock(&tr->max_lock);
7422 if (tr->cond_snapshot)
7424 arch_spin_unlock(&tr->max_lock);
7430 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7434 if (tr->allocated_snapshot)
7438 /* Only allow per-cpu swap if the ring buffer supports it */
7439 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7440 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7445 if (tr->allocated_snapshot)
7446 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7447 &tr->array_buffer, iter->cpu_file);
7449 ret = tracing_alloc_snapshot_instance(tr);
7452 local_irq_disable();
7453 /* Now, we're going to swap */
7454 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7455 update_max_tr(tr, current, smp_processor_id(), NULL);
7457 update_max_tr_single(tr, current, iter->cpu_file);
7461 if (tr->allocated_snapshot) {
7462 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7463 tracing_reset_online_cpus(&tr->max_buffer);
7465 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7475 mutex_unlock(&trace_types_lock);
7479 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7481 struct seq_file *m = file->private_data;
7484 ret = tracing_release(inode, file);
7486 if (file->f_mode & FMODE_READ)
7489 /* If write only, the seq_file is just a stub */
7497 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7498 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7499 size_t count, loff_t *ppos);
7500 static int tracing_buffers_release(struct inode *inode, struct file *file);
7501 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7502 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7504 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7506 struct ftrace_buffer_info *info;
7509 /* The following checks for tracefs lockdown */
7510 ret = tracing_buffers_open(inode, filp);
7514 info = filp->private_data;
7516 if (info->iter.trace->use_max_tr) {
7517 tracing_buffers_release(inode, filp);
7521 info->iter.snapshot = true;
7522 info->iter.array_buffer = &info->iter.tr->max_buffer;
7527 #endif /* CONFIG_TRACER_SNAPSHOT */
7530 static const struct file_operations tracing_thresh_fops = {
7531 .open = tracing_open_generic,
7532 .read = tracing_thresh_read,
7533 .write = tracing_thresh_write,
7534 .llseek = generic_file_llseek,
7537 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7538 static const struct file_operations tracing_max_lat_fops = {
7539 .open = tracing_open_generic,
7540 .read = tracing_max_lat_read,
7541 .write = tracing_max_lat_write,
7542 .llseek = generic_file_llseek,
7546 static const struct file_operations set_tracer_fops = {
7547 .open = tracing_open_generic,
7548 .read = tracing_set_trace_read,
7549 .write = tracing_set_trace_write,
7550 .llseek = generic_file_llseek,
7553 static const struct file_operations tracing_pipe_fops = {
7554 .open = tracing_open_pipe,
7555 .poll = tracing_poll_pipe,
7556 .read = tracing_read_pipe,
7557 .splice_read = tracing_splice_read_pipe,
7558 .release = tracing_release_pipe,
7559 .llseek = no_llseek,
7562 static const struct file_operations tracing_entries_fops = {
7563 .open = tracing_open_generic_tr,
7564 .read = tracing_entries_read,
7565 .write = tracing_entries_write,
7566 .llseek = generic_file_llseek,
7567 .release = tracing_release_generic_tr,
7570 static const struct file_operations tracing_total_entries_fops = {
7571 .open = tracing_open_generic_tr,
7572 .read = tracing_total_entries_read,
7573 .llseek = generic_file_llseek,
7574 .release = tracing_release_generic_tr,
7577 static const struct file_operations tracing_free_buffer_fops = {
7578 .open = tracing_open_generic_tr,
7579 .write = tracing_free_buffer_write,
7580 .release = tracing_free_buffer_release,
7583 static const struct file_operations tracing_mark_fops = {
7584 .open = tracing_mark_open,
7585 .write = tracing_mark_write,
7586 .release = tracing_release_generic_tr,
7589 static const struct file_operations tracing_mark_raw_fops = {
7590 .open = tracing_mark_open,
7591 .write = tracing_mark_raw_write,
7592 .release = tracing_release_generic_tr,
7595 static const struct file_operations trace_clock_fops = {
7596 .open = tracing_clock_open,
7598 .llseek = seq_lseek,
7599 .release = tracing_single_release_tr,
7600 .write = tracing_clock_write,
7603 static const struct file_operations trace_time_stamp_mode_fops = {
7604 .open = tracing_time_stamp_mode_open,
7606 .llseek = seq_lseek,
7607 .release = tracing_single_release_tr,
7610 #ifdef CONFIG_TRACER_SNAPSHOT
7611 static const struct file_operations snapshot_fops = {
7612 .open = tracing_snapshot_open,
7614 .write = tracing_snapshot_write,
7615 .llseek = tracing_lseek,
7616 .release = tracing_snapshot_release,
7619 static const struct file_operations snapshot_raw_fops = {
7620 .open = snapshot_raw_open,
7621 .read = tracing_buffers_read,
7622 .release = tracing_buffers_release,
7623 .splice_read = tracing_buffers_splice_read,
7624 .llseek = no_llseek,
7627 #endif /* CONFIG_TRACER_SNAPSHOT */
7630 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7631 * @filp: The active open file structure
7632 * @ubuf: The userspace provided buffer to read value into
7633 * @cnt: The maximum number of bytes to read
7634 * @ppos: The current "file" position
7636 * This function implements the write interface for a struct trace_min_max_param.
7637 * The filp->private_data must point to a trace_min_max_param structure that
7638 * defines where to write the value, the min and the max acceptable values,
7639 * and a lock to protect the write.
7642 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7644 struct trace_min_max_param *param = filp->private_data;
7651 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7656 mutex_lock(param->lock);
7658 if (param->min && val < *param->min)
7661 if (param->max && val > *param->max)
7668 mutex_unlock(param->lock);
7677 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7678 * @filp: The active open file structure
7679 * @ubuf: The userspace provided buffer to read value into
7680 * @cnt: The maximum number of bytes to read
7681 * @ppos: The current "file" position
7683 * This function implements the read interface for a struct trace_min_max_param.
7684 * The filp->private_data must point to a trace_min_max_param struct with valid
7688 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7690 struct trace_min_max_param *param = filp->private_data;
7691 char buf[U64_STR_SIZE];
7700 if (cnt > sizeof(buf))
7703 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7705 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7708 const struct file_operations trace_min_max_fops = {
7709 .open = tracing_open_generic,
7710 .read = trace_min_max_read,
7711 .write = trace_min_max_write,
7714 #define TRACING_LOG_ERRS_MAX 8
7715 #define TRACING_LOG_LOC_MAX 128
7717 #define CMD_PREFIX " Command: "
7720 const char **errs; /* ptr to loc-specific array of err strings */
7721 u8 type; /* index into errs -> specific err string */
7722 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7726 struct tracing_log_err {
7727 struct list_head list;
7728 struct err_info info;
7729 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7730 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7733 static DEFINE_MUTEX(tracing_err_log_lock);
7735 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7737 struct tracing_log_err *err;
7739 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7740 err = kzalloc(sizeof(*err), GFP_KERNEL);
7742 err = ERR_PTR(-ENOMEM);
7743 tr->n_err_log_entries++;
7748 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7749 list_del(&err->list);
7755 * err_pos - find the position of a string within a command for error careting
7756 * @cmd: The tracing command that caused the error
7757 * @str: The string to position the caret at within @cmd
7759 * Finds the position of the first occurrence of @str within @cmd. The
7760 * return value can be passed to tracing_log_err() for caret placement
7763 * Returns the index within @cmd of the first occurrence of @str or 0
7764 * if @str was not found.
7766 unsigned int err_pos(char *cmd, const char *str)
7770 if (WARN_ON(!strlen(cmd)))
7773 found = strstr(cmd, str);
7781 * tracing_log_err - write an error to the tracing error log
7782 * @tr: The associated trace array for the error (NULL for top level array)
7783 * @loc: A string describing where the error occurred
7784 * @cmd: The tracing command that caused the error
7785 * @errs: The array of loc-specific static error strings
7786 * @type: The index into errs[], which produces the specific static err string
7787 * @pos: The position the caret should be placed in the cmd
7789 * Writes an error into tracing/error_log of the form:
7791 * <loc>: error: <text>
7795 * tracing/error_log is a small log file containing the last
7796 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7797 * unless there has been a tracing error, and the error log can be
7798 * cleared and have its memory freed by writing the empty string in
7799 * truncation mode to it i.e. echo > tracing/error_log.
7801 * NOTE: the @errs array along with the @type param are used to
7802 * produce a static error string - this string is not copied and saved
7803 * when the error is logged - only a pointer to it is saved. See
7804 * existing callers for examples of how static strings are typically
7805 * defined for use with tracing_log_err().
7807 void tracing_log_err(struct trace_array *tr,
7808 const char *loc, const char *cmd,
7809 const char **errs, u8 type, u8 pos)
7811 struct tracing_log_err *err;
7816 mutex_lock(&tracing_err_log_lock);
7817 err = get_tracing_log_err(tr);
7818 if (PTR_ERR(err) == -ENOMEM) {
7819 mutex_unlock(&tracing_err_log_lock);
7823 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7824 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7826 err->info.errs = errs;
7827 err->info.type = type;
7828 err->info.pos = pos;
7829 err->info.ts = local_clock();
7831 list_add_tail(&err->list, &tr->err_log);
7832 mutex_unlock(&tracing_err_log_lock);
7835 static void clear_tracing_err_log(struct trace_array *tr)
7837 struct tracing_log_err *err, *next;
7839 mutex_lock(&tracing_err_log_lock);
7840 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7841 list_del(&err->list);
7845 tr->n_err_log_entries = 0;
7846 mutex_unlock(&tracing_err_log_lock);
7849 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7851 struct trace_array *tr = m->private;
7853 mutex_lock(&tracing_err_log_lock);
7855 return seq_list_start(&tr->err_log, *pos);
7858 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7860 struct trace_array *tr = m->private;
7862 return seq_list_next(v, &tr->err_log, pos);
7865 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7867 mutex_unlock(&tracing_err_log_lock);
7870 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7874 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7876 for (i = 0; i < pos; i++)
7881 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7883 struct tracing_log_err *err = v;
7886 const char *err_text = err->info.errs[err->info.type];
7887 u64 sec = err->info.ts;
7890 nsec = do_div(sec, NSEC_PER_SEC);
7891 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7892 err->loc, err_text);
7893 seq_printf(m, "%s", err->cmd);
7894 tracing_err_log_show_pos(m, err->info.pos);
7900 static const struct seq_operations tracing_err_log_seq_ops = {
7901 .start = tracing_err_log_seq_start,
7902 .next = tracing_err_log_seq_next,
7903 .stop = tracing_err_log_seq_stop,
7904 .show = tracing_err_log_seq_show
7907 static int tracing_err_log_open(struct inode *inode, struct file *file)
7909 struct trace_array *tr = inode->i_private;
7912 ret = tracing_check_open_get_tr(tr);
7916 /* If this file was opened for write, then erase contents */
7917 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7918 clear_tracing_err_log(tr);
7920 if (file->f_mode & FMODE_READ) {
7921 ret = seq_open(file, &tracing_err_log_seq_ops);
7923 struct seq_file *m = file->private_data;
7926 trace_array_put(tr);
7932 static ssize_t tracing_err_log_write(struct file *file,
7933 const char __user *buffer,
7934 size_t count, loff_t *ppos)
7939 static int tracing_err_log_release(struct inode *inode, struct file *file)
7941 struct trace_array *tr = inode->i_private;
7943 trace_array_put(tr);
7945 if (file->f_mode & FMODE_READ)
7946 seq_release(inode, file);
7951 static const struct file_operations tracing_err_log_fops = {
7952 .open = tracing_err_log_open,
7953 .write = tracing_err_log_write,
7955 .llseek = seq_lseek,
7956 .release = tracing_err_log_release,
7959 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7961 struct trace_array *tr = inode->i_private;
7962 struct ftrace_buffer_info *info;
7965 ret = tracing_check_open_get_tr(tr);
7969 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7971 trace_array_put(tr);
7975 mutex_lock(&trace_types_lock);
7978 info->iter.cpu_file = tracing_get_cpu(inode);
7979 info->iter.trace = tr->current_trace;
7980 info->iter.array_buffer = &tr->array_buffer;
7982 /* Force reading ring buffer for first read */
7983 info->read = (unsigned int)-1;
7985 filp->private_data = info;
7989 mutex_unlock(&trace_types_lock);
7991 ret = nonseekable_open(inode, filp);
7993 trace_array_put(tr);
7999 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8001 struct ftrace_buffer_info *info = filp->private_data;
8002 struct trace_iterator *iter = &info->iter;
8004 return trace_poll(iter, filp, poll_table);
8008 tracing_buffers_read(struct file *filp, char __user *ubuf,
8009 size_t count, loff_t *ppos)
8011 struct ftrace_buffer_info *info = filp->private_data;
8012 struct trace_iterator *iter = &info->iter;
8019 #ifdef CONFIG_TRACER_MAX_TRACE
8020 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8025 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8027 if (IS_ERR(info->spare)) {
8028 ret = PTR_ERR(info->spare);
8031 info->spare_cpu = iter->cpu_file;
8037 /* Do we have previous read data to read? */
8038 if (info->read < PAGE_SIZE)
8042 trace_access_lock(iter->cpu_file);
8043 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8047 trace_access_unlock(iter->cpu_file);
8050 if (trace_empty(iter)) {
8051 if ((filp->f_flags & O_NONBLOCK))
8054 ret = wait_on_pipe(iter, 0);
8065 size = PAGE_SIZE - info->read;
8069 ret = copy_to_user(ubuf, info->spare + info->read, size);
8081 static int tracing_buffers_release(struct inode *inode, struct file *file)
8083 struct ftrace_buffer_info *info = file->private_data;
8084 struct trace_iterator *iter = &info->iter;
8086 mutex_lock(&trace_types_lock);
8088 iter->tr->trace_ref--;
8090 __trace_array_put(iter->tr);
8093 ring_buffer_free_read_page(iter->array_buffer->buffer,
8094 info->spare_cpu, info->spare);
8097 mutex_unlock(&trace_types_lock);
8103 struct trace_buffer *buffer;
8106 refcount_t refcount;
8109 static void buffer_ref_release(struct buffer_ref *ref)
8111 if (!refcount_dec_and_test(&ref->refcount))
8113 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8117 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8118 struct pipe_buffer *buf)
8120 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8122 buffer_ref_release(ref);
8126 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8127 struct pipe_buffer *buf)
8129 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8131 if (refcount_read(&ref->refcount) > INT_MAX/2)
8134 refcount_inc(&ref->refcount);
8138 /* Pipe buffer operations for a buffer. */
8139 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8140 .release = buffer_pipe_buf_release,
8141 .get = buffer_pipe_buf_get,
8145 * Callback from splice_to_pipe(), if we need to release some pages
8146 * at the end of the spd in case we error'ed out in filling the pipe.
8148 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8150 struct buffer_ref *ref =
8151 (struct buffer_ref *)spd->partial[i].private;
8153 buffer_ref_release(ref);
8154 spd->partial[i].private = 0;
8158 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8159 struct pipe_inode_info *pipe, size_t len,
8162 struct ftrace_buffer_info *info = file->private_data;
8163 struct trace_iterator *iter = &info->iter;
8164 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8165 struct page *pages_def[PIPE_DEF_BUFFERS];
8166 struct splice_pipe_desc spd = {
8168 .partial = partial_def,
8169 .nr_pages_max = PIPE_DEF_BUFFERS,
8170 .ops = &buffer_pipe_buf_ops,
8171 .spd_release = buffer_spd_release,
8173 struct buffer_ref *ref;
8177 #ifdef CONFIG_TRACER_MAX_TRACE
8178 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8182 if (*ppos & (PAGE_SIZE - 1))
8185 if (len & (PAGE_SIZE - 1)) {
8186 if (len < PAGE_SIZE)
8191 if (splice_grow_spd(pipe, &spd))
8195 trace_access_lock(iter->cpu_file);
8196 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8198 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8202 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8208 refcount_set(&ref->refcount, 1);
8209 ref->buffer = iter->array_buffer->buffer;
8210 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8211 if (IS_ERR(ref->page)) {
8212 ret = PTR_ERR(ref->page);
8217 ref->cpu = iter->cpu_file;
8219 r = ring_buffer_read_page(ref->buffer, &ref->page,
8220 len, iter->cpu_file, 1);
8222 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8228 page = virt_to_page(ref->page);
8230 spd.pages[i] = page;
8231 spd.partial[i].len = PAGE_SIZE;
8232 spd.partial[i].offset = 0;
8233 spd.partial[i].private = (unsigned long)ref;
8237 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8240 trace_access_unlock(iter->cpu_file);
8243 /* did we read anything? */
8244 if (!spd.nr_pages) {
8249 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8252 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8259 ret = splice_to_pipe(pipe, &spd);
8261 splice_shrink_spd(&spd);
8266 static const struct file_operations tracing_buffers_fops = {
8267 .open = tracing_buffers_open,
8268 .read = tracing_buffers_read,
8269 .poll = tracing_buffers_poll,
8270 .release = tracing_buffers_release,
8271 .splice_read = tracing_buffers_splice_read,
8272 .llseek = no_llseek,
8276 tracing_stats_read(struct file *filp, char __user *ubuf,
8277 size_t count, loff_t *ppos)
8279 struct inode *inode = file_inode(filp);
8280 struct trace_array *tr = inode->i_private;
8281 struct array_buffer *trace_buf = &tr->array_buffer;
8282 int cpu = tracing_get_cpu(inode);
8283 struct trace_seq *s;
8285 unsigned long long t;
8286 unsigned long usec_rem;
8288 s = kmalloc(sizeof(*s), GFP_KERNEL);
8294 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8295 trace_seq_printf(s, "entries: %ld\n", cnt);
8297 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8298 trace_seq_printf(s, "overrun: %ld\n", cnt);
8300 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8301 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8303 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8304 trace_seq_printf(s, "bytes: %ld\n", cnt);
8306 if (trace_clocks[tr->clock_id].in_ns) {
8307 /* local or global for trace_clock */
8308 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8309 usec_rem = do_div(t, USEC_PER_SEC);
8310 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8313 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8314 usec_rem = do_div(t, USEC_PER_SEC);
8315 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8317 /* counter or tsc mode for trace_clock */
8318 trace_seq_printf(s, "oldest event ts: %llu\n",
8319 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8321 trace_seq_printf(s, "now ts: %llu\n",
8322 ring_buffer_time_stamp(trace_buf->buffer));
8325 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8326 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8328 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8329 trace_seq_printf(s, "read events: %ld\n", cnt);
8331 count = simple_read_from_buffer(ubuf, count, ppos,
8332 s->buffer, trace_seq_used(s));
8339 static const struct file_operations tracing_stats_fops = {
8340 .open = tracing_open_generic_tr,
8341 .read = tracing_stats_read,
8342 .llseek = generic_file_llseek,
8343 .release = tracing_release_generic_tr,
8346 #ifdef CONFIG_DYNAMIC_FTRACE
8349 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8350 size_t cnt, loff_t *ppos)
8356 /* 256 should be plenty to hold the amount needed */
8357 buf = kmalloc(256, GFP_KERNEL);
8361 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8362 ftrace_update_tot_cnt,
8363 ftrace_number_of_pages,
8364 ftrace_number_of_groups);
8366 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8371 static const struct file_operations tracing_dyn_info_fops = {
8372 .open = tracing_open_generic,
8373 .read = tracing_read_dyn_info,
8374 .llseek = generic_file_llseek,
8376 #endif /* CONFIG_DYNAMIC_FTRACE */
8378 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8380 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8381 struct trace_array *tr, struct ftrace_probe_ops *ops,
8384 tracing_snapshot_instance(tr);
8388 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8389 struct trace_array *tr, struct ftrace_probe_ops *ops,
8392 struct ftrace_func_mapper *mapper = data;
8396 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8406 tracing_snapshot_instance(tr);
8410 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8411 struct ftrace_probe_ops *ops, void *data)
8413 struct ftrace_func_mapper *mapper = data;
8416 seq_printf(m, "%ps:", (void *)ip);
8418 seq_puts(m, "snapshot");
8421 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8424 seq_printf(m, ":count=%ld\n", *count);
8426 seq_puts(m, ":unlimited\n");
8432 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8433 unsigned long ip, void *init_data, void **data)
8435 struct ftrace_func_mapper *mapper = *data;
8438 mapper = allocate_ftrace_func_mapper();
8444 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8448 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8449 unsigned long ip, void *data)
8451 struct ftrace_func_mapper *mapper = data;
8456 free_ftrace_func_mapper(mapper, NULL);
8460 ftrace_func_mapper_remove_ip(mapper, ip);
8463 static struct ftrace_probe_ops snapshot_probe_ops = {
8464 .func = ftrace_snapshot,
8465 .print = ftrace_snapshot_print,
8468 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8469 .func = ftrace_count_snapshot,
8470 .print = ftrace_snapshot_print,
8471 .init = ftrace_snapshot_init,
8472 .free = ftrace_snapshot_free,
8476 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8477 char *glob, char *cmd, char *param, int enable)
8479 struct ftrace_probe_ops *ops;
8480 void *count = (void *)-1;
8487 /* hash funcs only work with set_ftrace_filter */
8491 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8494 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8499 number = strsep(¶m, ":");
8501 if (!strlen(number))
8505 * We use the callback data field (which is a pointer)
8508 ret = kstrtoul(number, 0, (unsigned long *)&count);
8513 ret = tracing_alloc_snapshot_instance(tr);
8517 ret = register_ftrace_function_probe(glob, tr, ops, count);
8520 return ret < 0 ? ret : 0;
8523 static struct ftrace_func_command ftrace_snapshot_cmd = {
8525 .func = ftrace_trace_snapshot_callback,
8528 static __init int register_snapshot_cmd(void)
8530 return register_ftrace_command(&ftrace_snapshot_cmd);
8533 static inline __init int register_snapshot_cmd(void) { return 0; }
8534 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8536 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8538 if (WARN_ON(!tr->dir))
8539 return ERR_PTR(-ENODEV);
8541 /* Top directory uses NULL as the parent */
8542 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8545 /* All sub buffers have a descriptor */
8549 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8551 struct dentry *d_tracer;
8554 return tr->percpu_dir;
8556 d_tracer = tracing_get_dentry(tr);
8557 if (IS_ERR(d_tracer))
8560 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8562 MEM_FAIL(!tr->percpu_dir,
8563 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8565 return tr->percpu_dir;
8568 static struct dentry *
8569 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8570 void *data, long cpu, const struct file_operations *fops)
8572 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8574 if (ret) /* See tracing_get_cpu() */
8575 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8580 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8582 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8583 struct dentry *d_cpu;
8584 char cpu_dir[30]; /* 30 characters should be more than enough */
8589 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8590 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8592 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8596 /* per cpu trace_pipe */
8597 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8598 tr, cpu, &tracing_pipe_fops);
8601 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8602 tr, cpu, &tracing_fops);
8604 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8605 tr, cpu, &tracing_buffers_fops);
8607 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8608 tr, cpu, &tracing_stats_fops);
8610 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8611 tr, cpu, &tracing_entries_fops);
8613 #ifdef CONFIG_TRACER_SNAPSHOT
8614 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8615 tr, cpu, &snapshot_fops);
8617 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8618 tr, cpu, &snapshot_raw_fops);
8622 #ifdef CONFIG_FTRACE_SELFTEST
8623 /* Let selftest have access to static functions in this file */
8624 #include "trace_selftest.c"
8628 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8631 struct trace_option_dentry *topt = filp->private_data;
8634 if (topt->flags->val & topt->opt->bit)
8639 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8643 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8646 struct trace_option_dentry *topt = filp->private_data;
8650 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8654 if (val != 0 && val != 1)
8657 if (!!(topt->flags->val & topt->opt->bit) != val) {
8658 mutex_lock(&trace_types_lock);
8659 ret = __set_tracer_option(topt->tr, topt->flags,
8661 mutex_unlock(&trace_types_lock);
8672 static const struct file_operations trace_options_fops = {
8673 .open = tracing_open_generic,
8674 .read = trace_options_read,
8675 .write = trace_options_write,
8676 .llseek = generic_file_llseek,
8680 * In order to pass in both the trace_array descriptor as well as the index
8681 * to the flag that the trace option file represents, the trace_array
8682 * has a character array of trace_flags_index[], which holds the index
8683 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8684 * The address of this character array is passed to the flag option file
8685 * read/write callbacks.
8687 * In order to extract both the index and the trace_array descriptor,
8688 * get_tr_index() uses the following algorithm.
8692 * As the pointer itself contains the address of the index (remember
8695 * Then to get the trace_array descriptor, by subtracting that index
8696 * from the ptr, we get to the start of the index itself.
8698 * ptr - idx == &index[0]
8700 * Then a simple container_of() from that pointer gets us to the
8701 * trace_array descriptor.
8703 static void get_tr_index(void *data, struct trace_array **ptr,
8704 unsigned int *pindex)
8706 *pindex = *(unsigned char *)data;
8708 *ptr = container_of(data - *pindex, struct trace_array,
8713 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8716 void *tr_index = filp->private_data;
8717 struct trace_array *tr;
8721 get_tr_index(tr_index, &tr, &index);
8723 if (tr->trace_flags & (1 << index))
8728 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8732 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8735 void *tr_index = filp->private_data;
8736 struct trace_array *tr;
8741 get_tr_index(tr_index, &tr, &index);
8743 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8747 if (val != 0 && val != 1)
8750 mutex_lock(&event_mutex);
8751 mutex_lock(&trace_types_lock);
8752 ret = set_tracer_flag(tr, 1 << index, val);
8753 mutex_unlock(&trace_types_lock);
8754 mutex_unlock(&event_mutex);
8764 static const struct file_operations trace_options_core_fops = {
8765 .open = tracing_open_generic,
8766 .read = trace_options_core_read,
8767 .write = trace_options_core_write,
8768 .llseek = generic_file_llseek,
8771 struct dentry *trace_create_file(const char *name,
8773 struct dentry *parent,
8775 const struct file_operations *fops)
8779 ret = tracefs_create_file(name, mode, parent, data, fops);
8781 pr_warn("Could not create tracefs '%s' entry\n", name);
8787 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8789 struct dentry *d_tracer;
8794 d_tracer = tracing_get_dentry(tr);
8795 if (IS_ERR(d_tracer))
8798 tr->options = tracefs_create_dir("options", d_tracer);
8800 pr_warn("Could not create tracefs directory 'options'\n");
8808 create_trace_option_file(struct trace_array *tr,
8809 struct trace_option_dentry *topt,
8810 struct tracer_flags *flags,
8811 struct tracer_opt *opt)
8813 struct dentry *t_options;
8815 t_options = trace_options_init_dentry(tr);
8819 topt->flags = flags;
8823 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8824 t_options, topt, &trace_options_fops);
8829 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8831 struct trace_option_dentry *topts;
8832 struct trace_options *tr_topts;
8833 struct tracer_flags *flags;
8834 struct tracer_opt *opts;
8841 flags = tracer->flags;
8843 if (!flags || !flags->opts)
8847 * If this is an instance, only create flags for tracers
8848 * the instance may have.
8850 if (!trace_ok_for_array(tracer, tr))
8853 for (i = 0; i < tr->nr_topts; i++) {
8854 /* Make sure there's no duplicate flags. */
8855 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8861 for (cnt = 0; opts[cnt].name; cnt++)
8864 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8868 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8875 tr->topts = tr_topts;
8876 tr->topts[tr->nr_topts].tracer = tracer;
8877 tr->topts[tr->nr_topts].topts = topts;
8880 for (cnt = 0; opts[cnt].name; cnt++) {
8881 create_trace_option_file(tr, &topts[cnt], flags,
8883 MEM_FAIL(topts[cnt].entry == NULL,
8884 "Failed to create trace option: %s",
8889 static struct dentry *
8890 create_trace_option_core_file(struct trace_array *tr,
8891 const char *option, long index)
8893 struct dentry *t_options;
8895 t_options = trace_options_init_dentry(tr);
8899 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8900 (void *)&tr->trace_flags_index[index],
8901 &trace_options_core_fops);
8904 static void create_trace_options_dir(struct trace_array *tr)
8906 struct dentry *t_options;
8907 bool top_level = tr == &global_trace;
8910 t_options = trace_options_init_dentry(tr);
8914 for (i = 0; trace_options[i]; i++) {
8916 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8917 create_trace_option_core_file(tr, trace_options[i], i);
8922 rb_simple_read(struct file *filp, char __user *ubuf,
8923 size_t cnt, loff_t *ppos)
8925 struct trace_array *tr = filp->private_data;
8929 r = tracer_tracing_is_on(tr);
8930 r = sprintf(buf, "%d\n", r);
8932 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8936 rb_simple_write(struct file *filp, const char __user *ubuf,
8937 size_t cnt, loff_t *ppos)
8939 struct trace_array *tr = filp->private_data;
8940 struct trace_buffer *buffer = tr->array_buffer.buffer;
8944 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8949 mutex_lock(&trace_types_lock);
8950 if (!!val == tracer_tracing_is_on(tr)) {
8951 val = 0; /* do nothing */
8953 tracer_tracing_on(tr);
8954 if (tr->current_trace->start)
8955 tr->current_trace->start(tr);
8957 tracer_tracing_off(tr);
8958 if (tr->current_trace->stop)
8959 tr->current_trace->stop(tr);
8961 mutex_unlock(&trace_types_lock);
8969 static const struct file_operations rb_simple_fops = {
8970 .open = tracing_open_generic_tr,
8971 .read = rb_simple_read,
8972 .write = rb_simple_write,
8973 .release = tracing_release_generic_tr,
8974 .llseek = default_llseek,
8978 buffer_percent_read(struct file *filp, char __user *ubuf,
8979 size_t cnt, loff_t *ppos)
8981 struct trace_array *tr = filp->private_data;
8985 r = tr->buffer_percent;
8986 r = sprintf(buf, "%d\n", r);
8988 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8992 buffer_percent_write(struct file *filp, const char __user *ubuf,
8993 size_t cnt, loff_t *ppos)
8995 struct trace_array *tr = filp->private_data;
8999 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9009 tr->buffer_percent = val;
9016 static const struct file_operations buffer_percent_fops = {
9017 .open = tracing_open_generic_tr,
9018 .read = buffer_percent_read,
9019 .write = buffer_percent_write,
9020 .release = tracing_release_generic_tr,
9021 .llseek = default_llseek,
9024 static struct dentry *trace_instance_dir;
9027 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9030 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9032 enum ring_buffer_flags rb_flags;
9034 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9038 buf->buffer = ring_buffer_alloc(size, rb_flags);
9042 buf->data = alloc_percpu(struct trace_array_cpu);
9044 ring_buffer_free(buf->buffer);
9049 /* Allocate the first page for all buffers */
9050 set_buffer_entries(&tr->array_buffer,
9051 ring_buffer_size(tr->array_buffer.buffer, 0));
9056 static int allocate_trace_buffers(struct trace_array *tr, int size)
9060 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9064 #ifdef CONFIG_TRACER_MAX_TRACE
9065 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9066 allocate_snapshot ? size : 1);
9067 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9068 ring_buffer_free(tr->array_buffer.buffer);
9069 tr->array_buffer.buffer = NULL;
9070 free_percpu(tr->array_buffer.data);
9071 tr->array_buffer.data = NULL;
9074 tr->allocated_snapshot = allocate_snapshot;
9077 * Only the top level trace array gets its snapshot allocated
9078 * from the kernel command line.
9080 allocate_snapshot = false;
9086 static void free_trace_buffer(struct array_buffer *buf)
9089 ring_buffer_free(buf->buffer);
9091 free_percpu(buf->data);
9096 static void free_trace_buffers(struct trace_array *tr)
9101 free_trace_buffer(&tr->array_buffer);
9103 #ifdef CONFIG_TRACER_MAX_TRACE
9104 free_trace_buffer(&tr->max_buffer);
9108 static void init_trace_flags_index(struct trace_array *tr)
9112 /* Used by the trace options files */
9113 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9114 tr->trace_flags_index[i] = i;
9117 static void __update_tracer_options(struct trace_array *tr)
9121 for (t = trace_types; t; t = t->next)
9122 add_tracer_options(tr, t);
9125 static void update_tracer_options(struct trace_array *tr)
9127 mutex_lock(&trace_types_lock);
9128 __update_tracer_options(tr);
9129 mutex_unlock(&trace_types_lock);
9132 /* Must have trace_types_lock held */
9133 struct trace_array *trace_array_find(const char *instance)
9135 struct trace_array *tr, *found = NULL;
9137 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9138 if (tr->name && strcmp(tr->name, instance) == 0) {
9147 struct trace_array *trace_array_find_get(const char *instance)
9149 struct trace_array *tr;
9151 mutex_lock(&trace_types_lock);
9152 tr = trace_array_find(instance);
9155 mutex_unlock(&trace_types_lock);
9160 static int trace_array_create_dir(struct trace_array *tr)
9164 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9168 ret = event_trace_add_tracer(tr->dir, tr);
9170 tracefs_remove(tr->dir);
9174 init_tracer_tracefs(tr, tr->dir);
9175 __update_tracer_options(tr);
9180 static struct trace_array *trace_array_create(const char *name)
9182 struct trace_array *tr;
9186 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9188 return ERR_PTR(ret);
9190 tr->name = kstrdup(name, GFP_KERNEL);
9194 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9197 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9199 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9201 raw_spin_lock_init(&tr->start_lock);
9203 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9205 tr->current_trace = &nop_trace;
9207 INIT_LIST_HEAD(&tr->systems);
9208 INIT_LIST_HEAD(&tr->events);
9209 INIT_LIST_HEAD(&tr->hist_vars);
9210 INIT_LIST_HEAD(&tr->err_log);
9212 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9215 if (ftrace_allocate_ftrace_ops(tr) < 0)
9218 ftrace_init_trace_array(tr);
9220 init_trace_flags_index(tr);
9222 if (trace_instance_dir) {
9223 ret = trace_array_create_dir(tr);
9227 __trace_early_add_events(tr);
9229 list_add(&tr->list, &ftrace_trace_arrays);
9236 ftrace_free_ftrace_ops(tr);
9237 free_trace_buffers(tr);
9238 free_cpumask_var(tr->tracing_cpumask);
9242 return ERR_PTR(ret);
9245 static int instance_mkdir(const char *name)
9247 struct trace_array *tr;
9250 mutex_lock(&event_mutex);
9251 mutex_lock(&trace_types_lock);
9254 if (trace_array_find(name))
9257 tr = trace_array_create(name);
9259 ret = PTR_ERR_OR_ZERO(tr);
9262 mutex_unlock(&trace_types_lock);
9263 mutex_unlock(&event_mutex);
9268 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9269 * @name: The name of the trace array to be looked up/created.
9271 * Returns pointer to trace array with given name.
9272 * NULL, if it cannot be created.
9274 * NOTE: This function increments the reference counter associated with the
9275 * trace array returned. This makes sure it cannot be freed while in use.
9276 * Use trace_array_put() once the trace array is no longer needed.
9277 * If the trace_array is to be freed, trace_array_destroy() needs to
9278 * be called after the trace_array_put(), or simply let user space delete
9279 * it from the tracefs instances directory. But until the
9280 * trace_array_put() is called, user space can not delete it.
9283 struct trace_array *trace_array_get_by_name(const char *name)
9285 struct trace_array *tr;
9287 mutex_lock(&event_mutex);
9288 mutex_lock(&trace_types_lock);
9290 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9291 if (tr->name && strcmp(tr->name, name) == 0)
9295 tr = trace_array_create(name);
9303 mutex_unlock(&trace_types_lock);
9304 mutex_unlock(&event_mutex);
9307 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9309 static int __remove_instance(struct trace_array *tr)
9313 /* Reference counter for a newly created trace array = 1. */
9314 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9317 list_del(&tr->list);
9319 /* Disable all the flags that were enabled coming in */
9320 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9321 if ((1 << i) & ZEROED_TRACE_FLAGS)
9322 set_tracer_flag(tr, 1 << i, 0);
9325 tracing_set_nop(tr);
9326 clear_ftrace_function_probes(tr);
9327 event_trace_del_tracer(tr);
9328 ftrace_clear_pids(tr);
9329 ftrace_destroy_function_files(tr);
9330 tracefs_remove(tr->dir);
9331 free_percpu(tr->last_func_repeats);
9332 free_trace_buffers(tr);
9334 for (i = 0; i < tr->nr_topts; i++) {
9335 kfree(tr->topts[i].topts);
9339 free_cpumask_var(tr->tracing_cpumask);
9346 int trace_array_destroy(struct trace_array *this_tr)
9348 struct trace_array *tr;
9354 mutex_lock(&event_mutex);
9355 mutex_lock(&trace_types_lock);
9359 /* Making sure trace array exists before destroying it. */
9360 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9361 if (tr == this_tr) {
9362 ret = __remove_instance(tr);
9367 mutex_unlock(&trace_types_lock);
9368 mutex_unlock(&event_mutex);
9372 EXPORT_SYMBOL_GPL(trace_array_destroy);
9374 static int instance_rmdir(const char *name)
9376 struct trace_array *tr;
9379 mutex_lock(&event_mutex);
9380 mutex_lock(&trace_types_lock);
9383 tr = trace_array_find(name);
9385 ret = __remove_instance(tr);
9387 mutex_unlock(&trace_types_lock);
9388 mutex_unlock(&event_mutex);
9393 static __init void create_trace_instances(struct dentry *d_tracer)
9395 struct trace_array *tr;
9397 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9400 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9403 mutex_lock(&event_mutex);
9404 mutex_lock(&trace_types_lock);
9406 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9409 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9410 "Failed to create instance directory\n"))
9414 mutex_unlock(&trace_types_lock);
9415 mutex_unlock(&event_mutex);
9419 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9421 struct trace_event_file *file;
9424 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9425 tr, &show_traces_fops);
9427 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9428 tr, &set_tracer_fops);
9430 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9431 tr, &tracing_cpumask_fops);
9433 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9434 tr, &tracing_iter_fops);
9436 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9439 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9440 tr, &tracing_pipe_fops);
9442 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9443 tr, &tracing_entries_fops);
9445 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9446 tr, &tracing_total_entries_fops);
9448 trace_create_file("free_buffer", 0200, d_tracer,
9449 tr, &tracing_free_buffer_fops);
9451 trace_create_file("trace_marker", 0220, d_tracer,
9452 tr, &tracing_mark_fops);
9454 file = __find_event_file(tr, "ftrace", "print");
9455 if (file && file->dir)
9456 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9457 file, &event_trigger_fops);
9458 tr->trace_marker_file = file;
9460 trace_create_file("trace_marker_raw", 0220, d_tracer,
9461 tr, &tracing_mark_raw_fops);
9463 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9466 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9467 tr, &rb_simple_fops);
9469 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9470 &trace_time_stamp_mode_fops);
9472 tr->buffer_percent = 50;
9474 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9475 tr, &buffer_percent_fops);
9477 create_trace_options_dir(tr);
9479 trace_create_maxlat_file(tr, d_tracer);
9481 if (ftrace_create_function_files(tr, d_tracer))
9482 MEM_FAIL(1, "Could not allocate function filter files");
9484 #ifdef CONFIG_TRACER_SNAPSHOT
9485 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9486 tr, &snapshot_fops);
9489 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9490 tr, &tracing_err_log_fops);
9492 for_each_tracing_cpu(cpu)
9493 tracing_init_tracefs_percpu(tr, cpu);
9495 ftrace_init_tracefs(tr, d_tracer);
9498 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9500 struct vfsmount *mnt;
9501 struct file_system_type *type;
9504 * To maintain backward compatibility for tools that mount
9505 * debugfs to get to the tracing facility, tracefs is automatically
9506 * mounted to the debugfs/tracing directory.
9508 type = get_fs_type("tracefs");
9511 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9512 put_filesystem(type);
9521 * tracing_init_dentry - initialize top level trace array
9523 * This is called when creating files or directories in the tracing
9524 * directory. It is called via fs_initcall() by any of the boot up code
9525 * and expects to return the dentry of the top level tracing directory.
9527 int tracing_init_dentry(void)
9529 struct trace_array *tr = &global_trace;
9531 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9532 pr_warn("Tracing disabled due to lockdown\n");
9536 /* The top level trace array uses NULL as parent */
9540 if (WARN_ON(!tracefs_initialized()))
9544 * As there may still be users that expect the tracing
9545 * files to exist in debugfs/tracing, we must automount
9546 * the tracefs file system there, so older tools still
9547 * work with the newer kernel.
9549 tr->dir = debugfs_create_automount("tracing", NULL,
9550 trace_automount, NULL);
9555 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9556 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9558 static struct workqueue_struct *eval_map_wq __initdata;
9559 static struct work_struct eval_map_work __initdata;
9561 static void __init eval_map_work_func(struct work_struct *work)
9565 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9566 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9569 static int __init trace_eval_init(void)
9571 INIT_WORK(&eval_map_work, eval_map_work_func);
9573 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9575 pr_err("Unable to allocate eval_map_wq\n");
9577 eval_map_work_func(&eval_map_work);
9581 queue_work(eval_map_wq, &eval_map_work);
9585 static int __init trace_eval_sync(void)
9587 /* Make sure the eval map updates are finished */
9589 destroy_workqueue(eval_map_wq);
9593 late_initcall_sync(trace_eval_sync);
9596 #ifdef CONFIG_MODULES
9597 static void trace_module_add_evals(struct module *mod)
9599 if (!mod->num_trace_evals)
9603 * Modules with bad taint do not have events created, do
9604 * not bother with enums either.
9606 if (trace_module_has_bad_taint(mod))
9609 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9612 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9613 static void trace_module_remove_evals(struct module *mod)
9615 union trace_eval_map_item *map;
9616 union trace_eval_map_item **last = &trace_eval_maps;
9618 if (!mod->num_trace_evals)
9621 mutex_lock(&trace_eval_mutex);
9623 map = trace_eval_maps;
9626 if (map->head.mod == mod)
9628 map = trace_eval_jmp_to_tail(map);
9629 last = &map->tail.next;
9630 map = map->tail.next;
9635 *last = trace_eval_jmp_to_tail(map)->tail.next;
9638 mutex_unlock(&trace_eval_mutex);
9641 static inline void trace_module_remove_evals(struct module *mod) { }
9642 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9644 static int trace_module_notify(struct notifier_block *self,
9645 unsigned long val, void *data)
9647 struct module *mod = data;
9650 case MODULE_STATE_COMING:
9651 trace_module_add_evals(mod);
9653 case MODULE_STATE_GOING:
9654 trace_module_remove_evals(mod);
9661 static struct notifier_block trace_module_nb = {
9662 .notifier_call = trace_module_notify,
9665 #endif /* CONFIG_MODULES */
9667 static __init int tracer_init_tracefs(void)
9671 trace_access_lock_init();
9673 ret = tracing_init_dentry();
9679 init_tracer_tracefs(&global_trace, NULL);
9680 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9682 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9683 &global_trace, &tracing_thresh_fops);
9685 trace_create_file("README", TRACE_MODE_READ, NULL,
9686 NULL, &tracing_readme_fops);
9688 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9689 NULL, &tracing_saved_cmdlines_fops);
9691 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9692 NULL, &tracing_saved_cmdlines_size_fops);
9694 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9695 NULL, &tracing_saved_tgids_fops);
9699 trace_create_eval_file(NULL);
9701 #ifdef CONFIG_MODULES
9702 register_module_notifier(&trace_module_nb);
9705 #ifdef CONFIG_DYNAMIC_FTRACE
9706 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9707 NULL, &tracing_dyn_info_fops);
9710 create_trace_instances(NULL);
9712 update_tracer_options(&global_trace);
9717 fs_initcall(tracer_init_tracefs);
9719 static int trace_panic_handler(struct notifier_block *this,
9720 unsigned long event, void *unused)
9722 if (ftrace_dump_on_oops)
9723 ftrace_dump(ftrace_dump_on_oops);
9727 static struct notifier_block trace_panic_notifier = {
9728 .notifier_call = trace_panic_handler,
9730 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9733 static int trace_die_handler(struct notifier_block *self,
9739 if (ftrace_dump_on_oops)
9740 ftrace_dump(ftrace_dump_on_oops);
9748 static struct notifier_block trace_die_notifier = {
9749 .notifier_call = trace_die_handler,
9754 * printk is set to max of 1024, we really don't need it that big.
9755 * Nothing should be printing 1000 characters anyway.
9757 #define TRACE_MAX_PRINT 1000
9760 * Define here KERN_TRACE so that we have one place to modify
9761 * it if we decide to change what log level the ftrace dump
9764 #define KERN_TRACE KERN_EMERG
9767 trace_printk_seq(struct trace_seq *s)
9769 /* Probably should print a warning here. */
9770 if (s->seq.len >= TRACE_MAX_PRINT)
9771 s->seq.len = TRACE_MAX_PRINT;
9774 * More paranoid code. Although the buffer size is set to
9775 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9776 * an extra layer of protection.
9778 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9779 s->seq.len = s->seq.size - 1;
9781 /* should be zero ended, but we are paranoid. */
9782 s->buffer[s->seq.len] = 0;
9784 printk(KERN_TRACE "%s", s->buffer);
9789 void trace_init_global_iter(struct trace_iterator *iter)
9791 iter->tr = &global_trace;
9792 iter->trace = iter->tr->current_trace;
9793 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9794 iter->array_buffer = &global_trace.array_buffer;
9796 if (iter->trace && iter->trace->open)
9797 iter->trace->open(iter);
9799 /* Annotate start of buffers if we had overruns */
9800 if (ring_buffer_overruns(iter->array_buffer->buffer))
9801 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9803 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9804 if (trace_clocks[iter->tr->clock_id].in_ns)
9805 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9808 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9810 /* use static because iter can be a bit big for the stack */
9811 static struct trace_iterator iter;
9812 static atomic_t dump_running;
9813 struct trace_array *tr = &global_trace;
9814 unsigned int old_userobj;
9815 unsigned long flags;
9818 /* Only allow one dump user at a time. */
9819 if (atomic_inc_return(&dump_running) != 1) {
9820 atomic_dec(&dump_running);
9825 * Always turn off tracing when we dump.
9826 * We don't need to show trace output of what happens
9827 * between multiple crashes.
9829 * If the user does a sysrq-z, then they can re-enable
9830 * tracing with echo 1 > tracing_on.
9834 local_irq_save(flags);
9836 /* Simulate the iterator */
9837 trace_init_global_iter(&iter);
9838 /* Can not use kmalloc for iter.temp and iter.fmt */
9839 iter.temp = static_temp_buf;
9840 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9841 iter.fmt = static_fmt_buf;
9842 iter.fmt_size = STATIC_FMT_BUF_SIZE;
9844 for_each_tracing_cpu(cpu) {
9845 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9848 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9850 /* don't look at user memory in panic mode */
9851 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9853 switch (oops_dump_mode) {
9855 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9858 iter.cpu_file = raw_smp_processor_id();
9863 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9864 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9867 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9869 /* Did function tracer already get disabled? */
9870 if (ftrace_is_dead()) {
9871 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9872 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9876 * We need to stop all tracing on all CPUS to read
9877 * the next buffer. This is a bit expensive, but is
9878 * not done often. We fill all what we can read,
9879 * and then release the locks again.
9882 while (!trace_empty(&iter)) {
9885 printk(KERN_TRACE "---------------------------------\n");
9889 trace_iterator_reset(&iter);
9890 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9892 if (trace_find_next_entry_inc(&iter) != NULL) {
9895 ret = print_trace_line(&iter);
9896 if (ret != TRACE_TYPE_NO_CONSUME)
9897 trace_consume(&iter);
9899 touch_nmi_watchdog();
9901 trace_printk_seq(&iter.seq);
9905 printk(KERN_TRACE " (ftrace buffer empty)\n");
9907 printk(KERN_TRACE "---------------------------------\n");
9910 tr->trace_flags |= old_userobj;
9912 for_each_tracing_cpu(cpu) {
9913 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9915 atomic_dec(&dump_running);
9916 local_irq_restore(flags);
9918 EXPORT_SYMBOL_GPL(ftrace_dump);
9920 #define WRITE_BUFSIZE 4096
9922 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9923 size_t count, loff_t *ppos,
9924 int (*createfn)(const char *))
9926 char *kbuf, *buf, *tmp;
9931 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9935 while (done < count) {
9936 size = count - done;
9938 if (size >= WRITE_BUFSIZE)
9939 size = WRITE_BUFSIZE - 1;
9941 if (copy_from_user(kbuf, buffer + done, size)) {
9948 tmp = strchr(buf, '\n');
9951 size = tmp - buf + 1;
9954 if (done + size < count) {
9957 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9958 pr_warn("Line length is too long: Should be less than %d\n",
9966 /* Remove comments */
9967 tmp = strchr(buf, '#');
9972 ret = createfn(buf);
9977 } while (done < count);
9987 __init static int tracer_alloc_buffers(void)
9993 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9994 pr_warn("Tracing disabled due to lockdown\n");
9999 * Make sure we don't accidentally add more trace options
10000 * than we have bits for.
10002 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10004 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10007 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10008 goto out_free_buffer_mask;
10010 /* Only allocate trace_printk buffers if a trace_printk exists */
10011 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10012 /* Must be called before global_trace.buffer is allocated */
10013 trace_printk_init_buffers();
10015 /* To save memory, keep the ring buffer size to its minimum */
10016 if (ring_buffer_expanded)
10017 ring_buf_size = trace_buf_size;
10021 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10022 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10024 raw_spin_lock_init(&global_trace.start_lock);
10027 * The prepare callbacks allocates some memory for the ring buffer. We
10028 * don't free the buffer if the CPU goes down. If we were to free
10029 * the buffer, then the user would lose any trace that was in the
10030 * buffer. The memory will be removed once the "instance" is removed.
10032 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10033 "trace/RB:preapre", trace_rb_cpu_prepare,
10036 goto out_free_cpumask;
10037 /* Used for event triggers */
10039 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10041 goto out_rm_hp_state;
10043 if (trace_create_savedcmd() < 0)
10044 goto out_free_temp_buffer;
10046 /* TODO: make the number of buffers hot pluggable with CPUS */
10047 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10048 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10049 goto out_free_savedcmd;
10052 if (global_trace.buffer_disabled)
10055 if (trace_boot_clock) {
10056 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10058 pr_warn("Trace clock %s not defined, going back to default\n",
10063 * register_tracer() might reference current_trace, so it
10064 * needs to be set before we register anything. This is
10065 * just a bootstrap of current_trace anyway.
10067 global_trace.current_trace = &nop_trace;
10069 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10071 ftrace_init_global_array_ops(&global_trace);
10073 init_trace_flags_index(&global_trace);
10075 register_tracer(&nop_trace);
10077 /* Function tracing may start here (via kernel command line) */
10078 init_function_trace();
10080 /* All seems OK, enable tracing */
10081 tracing_disabled = 0;
10083 atomic_notifier_chain_register(&panic_notifier_list,
10084 &trace_panic_notifier);
10086 register_die_notifier(&trace_die_notifier);
10088 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10090 INIT_LIST_HEAD(&global_trace.systems);
10091 INIT_LIST_HEAD(&global_trace.events);
10092 INIT_LIST_HEAD(&global_trace.hist_vars);
10093 INIT_LIST_HEAD(&global_trace.err_log);
10094 list_add(&global_trace.list, &ftrace_trace_arrays);
10096 apply_trace_boot_options();
10098 register_snapshot_cmd();
10105 free_saved_cmdlines_buffer(savedcmd);
10106 out_free_temp_buffer:
10107 ring_buffer_free(temp_buffer);
10109 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10111 free_cpumask_var(global_trace.tracing_cpumask);
10112 out_free_buffer_mask:
10113 free_cpumask_var(tracing_buffer_mask);
10118 void __init early_trace_init(void)
10120 if (tracepoint_printk) {
10121 tracepoint_print_iter =
10122 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10123 if (MEM_FAIL(!tracepoint_print_iter,
10124 "Failed to allocate trace iterator\n"))
10125 tracepoint_printk = 0;
10127 static_key_enable(&tracepoint_printk_key.key);
10129 tracer_alloc_buffers();
10132 void __init trace_init(void)
10134 trace_event_init();
10137 __init static void clear_boot_tracer(void)
10140 * The default tracer at boot buffer is an init section.
10141 * This function is called in lateinit. If we did not
10142 * find the boot tracer, then clear it out, to prevent
10143 * later registration from accessing the buffer that is
10144 * about to be freed.
10146 if (!default_bootup_tracer)
10149 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10150 default_bootup_tracer);
10151 default_bootup_tracer = NULL;
10154 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10155 __init static void tracing_set_default_clock(void)
10157 /* sched_clock_stable() is determined in late_initcall */
10158 if (!trace_boot_clock && !sched_clock_stable()) {
10159 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10160 pr_warn("Can not set tracing clock due to lockdown\n");
10164 printk(KERN_WARNING
10165 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10166 "If you want to keep using the local clock, then add:\n"
10167 " \"trace_clock=local\"\n"
10168 "on the kernel command line\n");
10169 tracing_set_clock(&global_trace, "global");
10173 static inline void tracing_set_default_clock(void) { }
10176 __init static int late_trace_init(void)
10178 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10179 static_key_disable(&tracepoint_printk_key.key);
10180 tracepoint_printk = 0;
10183 tracing_set_default_clock();
10184 clear_boot_tracer();
10188 late_initcall_sync(late_trace_init);