1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
53 #include "trace_output.h"
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
59 bool ring_buffer_expanded;
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
68 static bool __read_mostly tracing_selftest_running;
71 * If boot-time tracing including tracers/events via kernel cmdline
72 * is running, we do not want to run SELFTEST.
74 bool __read_mostly tracing_selftest_disabled;
76 #ifdef CONFIG_FTRACE_STARTUP_TEST
77 void __init disable_tracing_selftest(const char *reason)
79 if (!tracing_selftest_disabled) {
80 tracing_selftest_disabled = true;
81 pr_info("Ftrace startup test is disabled due to %s\n", reason);
86 /* Pipe tracepoints to printk */
87 struct trace_iterator *tracepoint_print_iter;
88 int tracepoint_printk;
89 static bool tracepoint_printk_stop_on_boot __initdata;
90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
92 /* For tracers that don't implement custom flags */
93 static struct tracer_opt dummy_tracer_opt[] = {
98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
104 * To prevent the comm cache from being overwritten when no
105 * tracing is active, only save the comm when a trace event
108 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
111 * Kill all tracing for good (never come back).
112 * It is initialized to 1 but will turn to zero if the initialization
113 * of the tracer is successful. But that is the only place that sets
116 static int tracing_disabled = 1;
118 cpumask_var_t __read_mostly tracing_buffer_mask;
121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
123 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
124 * is set, then ftrace_dump is called. This will output the contents
125 * of the ftrace buffers to the console. This is very useful for
126 * capturing traces that lead to crashes and outputing it to a
129 * It is default off, but you can enable it with either specifying
130 * "ftrace_dump_on_oops" in the kernel command line, or setting
131 * /proc/sys/kernel/ftrace_dump_on_oops
132 * Set 1 if you want to dump buffers of all CPUs
133 * Set 2 if you want to dump the buffer of the CPU that triggered oops
136 enum ftrace_dump_mode ftrace_dump_on_oops;
138 /* When set, tracing will stop when a WARN*() is hit */
139 int __disable_trace_on_warning;
141 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
142 /* Map of enums to their values, for "eval_map" file */
143 struct trace_eval_map_head {
145 unsigned long length;
148 union trace_eval_map_item;
150 struct trace_eval_map_tail {
152 * "end" is first and points to NULL as it must be different
153 * than "mod" or "eval_string"
155 union trace_eval_map_item *next;
156 const char *end; /* points to NULL */
159 static DEFINE_MUTEX(trace_eval_mutex);
162 * The trace_eval_maps are saved in an array with two extra elements,
163 * one at the beginning, and one at the end. The beginning item contains
164 * the count of the saved maps (head.length), and the module they
165 * belong to if not built in (head.mod). The ending item contains a
166 * pointer to the next array of saved eval_map items.
168 union trace_eval_map_item {
169 struct trace_eval_map map;
170 struct trace_eval_map_head head;
171 struct trace_eval_map_tail tail;
174 static union trace_eval_map_item *trace_eval_maps;
175 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
177 int tracing_set_tracer(struct trace_array *tr, const char *buf);
178 static void ftrace_trace_userstack(struct trace_array *tr,
179 struct trace_buffer *buffer,
180 unsigned int trace_ctx);
182 #define MAX_TRACER_SIZE 100
183 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
184 static char *default_bootup_tracer;
186 static bool allocate_snapshot;
188 static int __init set_cmdline_ftrace(char *str)
190 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
191 default_bootup_tracer = bootup_tracer_buf;
192 /* We are using ftrace early, expand it */
193 ring_buffer_expanded = true;
196 __setup("ftrace=", set_cmdline_ftrace);
198 static int __init set_ftrace_dump_on_oops(char *str)
200 if (*str++ != '=' || !*str || !strcmp("1", str)) {
201 ftrace_dump_on_oops = DUMP_ALL;
205 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
206 ftrace_dump_on_oops = DUMP_ORIG;
212 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
214 static int __init stop_trace_on_warning(char *str)
216 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
217 __disable_trace_on_warning = 1;
220 __setup("traceoff_on_warning", stop_trace_on_warning);
222 static int __init boot_alloc_snapshot(char *str)
224 allocate_snapshot = true;
225 /* We also need the main ring buffer expanded */
226 ring_buffer_expanded = true;
229 __setup("alloc_snapshot", boot_alloc_snapshot);
232 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
234 static int __init set_trace_boot_options(char *str)
236 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
239 __setup("trace_options=", set_trace_boot_options);
241 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
242 static char *trace_boot_clock __initdata;
244 static int __init set_trace_boot_clock(char *str)
246 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
247 trace_boot_clock = trace_boot_clock_buf;
250 __setup("trace_clock=", set_trace_boot_clock);
252 static int __init set_tracepoint_printk(char *str)
254 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
255 tracepoint_printk = 1;
258 __setup("tp_printk", set_tracepoint_printk);
260 static int __init set_tracepoint_printk_stop(char *str)
262 tracepoint_printk_stop_on_boot = true;
265 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
267 unsigned long long ns2usecs(u64 nsec)
275 trace_process_export(struct trace_export *export,
276 struct ring_buffer_event *event, int flag)
278 struct trace_entry *entry;
279 unsigned int size = 0;
281 if (export->flags & flag) {
282 entry = ring_buffer_event_data(event);
283 size = ring_buffer_event_length(event);
284 export->write(export, entry, size);
288 static DEFINE_MUTEX(ftrace_export_lock);
290 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
292 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
293 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
294 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
296 static inline void ftrace_exports_enable(struct trace_export *export)
298 if (export->flags & TRACE_EXPORT_FUNCTION)
299 static_branch_inc(&trace_function_exports_enabled);
301 if (export->flags & TRACE_EXPORT_EVENT)
302 static_branch_inc(&trace_event_exports_enabled);
304 if (export->flags & TRACE_EXPORT_MARKER)
305 static_branch_inc(&trace_marker_exports_enabled);
308 static inline void ftrace_exports_disable(struct trace_export *export)
310 if (export->flags & TRACE_EXPORT_FUNCTION)
311 static_branch_dec(&trace_function_exports_enabled);
313 if (export->flags & TRACE_EXPORT_EVENT)
314 static_branch_dec(&trace_event_exports_enabled);
316 if (export->flags & TRACE_EXPORT_MARKER)
317 static_branch_dec(&trace_marker_exports_enabled);
320 static void ftrace_exports(struct ring_buffer_event *event, int flag)
322 struct trace_export *export;
324 preempt_disable_notrace();
326 export = rcu_dereference_raw_check(ftrace_exports_list);
328 trace_process_export(export, event, flag);
329 export = rcu_dereference_raw_check(export->next);
332 preempt_enable_notrace();
336 add_trace_export(struct trace_export **list, struct trace_export *export)
338 rcu_assign_pointer(export->next, *list);
340 * We are entering export into the list but another
341 * CPU might be walking that list. We need to make sure
342 * the export->next pointer is valid before another CPU sees
343 * the export pointer included into the list.
345 rcu_assign_pointer(*list, export);
349 rm_trace_export(struct trace_export **list, struct trace_export *export)
351 struct trace_export **p;
353 for (p = list; *p != NULL; p = &(*p)->next)
360 rcu_assign_pointer(*p, (*p)->next);
366 add_ftrace_export(struct trace_export **list, struct trace_export *export)
368 ftrace_exports_enable(export);
370 add_trace_export(list, export);
374 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
378 ret = rm_trace_export(list, export);
379 ftrace_exports_disable(export);
384 int register_ftrace_export(struct trace_export *export)
386 if (WARN_ON_ONCE(!export->write))
389 mutex_lock(&ftrace_export_lock);
391 add_ftrace_export(&ftrace_exports_list, export);
393 mutex_unlock(&ftrace_export_lock);
397 EXPORT_SYMBOL_GPL(register_ftrace_export);
399 int unregister_ftrace_export(struct trace_export *export)
403 mutex_lock(&ftrace_export_lock);
405 ret = rm_ftrace_export(&ftrace_exports_list, export);
407 mutex_unlock(&ftrace_export_lock);
411 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
413 /* trace_flags holds trace_options default values */
414 #define TRACE_DEFAULT_FLAGS \
415 (FUNCTION_DEFAULT_FLAGS | \
416 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
417 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
418 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
419 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
422 /* trace_options that are only supported by global_trace */
423 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
424 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
426 /* trace_flags that are default zero for instances */
427 #define ZEROED_TRACE_FLAGS \
428 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
431 * The global_trace is the descriptor that holds the top-level tracing
432 * buffers for the live tracing.
434 static struct trace_array global_trace = {
435 .trace_flags = TRACE_DEFAULT_FLAGS,
438 LIST_HEAD(ftrace_trace_arrays);
440 int trace_array_get(struct trace_array *this_tr)
442 struct trace_array *tr;
445 mutex_lock(&trace_types_lock);
446 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
453 mutex_unlock(&trace_types_lock);
458 static void __trace_array_put(struct trace_array *this_tr)
460 WARN_ON(!this_tr->ref);
465 * trace_array_put - Decrement the reference counter for this trace array.
466 * @this_tr : pointer to the trace array
468 * NOTE: Use this when we no longer need the trace array returned by
469 * trace_array_get_by_name(). This ensures the trace array can be later
473 void trace_array_put(struct trace_array *this_tr)
478 mutex_lock(&trace_types_lock);
479 __trace_array_put(this_tr);
480 mutex_unlock(&trace_types_lock);
482 EXPORT_SYMBOL_GPL(trace_array_put);
484 int tracing_check_open_get_tr(struct trace_array *tr)
488 ret = security_locked_down(LOCKDOWN_TRACEFS);
492 if (tracing_disabled)
495 if (tr && trace_array_get(tr) < 0)
501 int call_filter_check_discard(struct trace_event_call *call, void *rec,
502 struct trace_buffer *buffer,
503 struct ring_buffer_event *event)
505 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
506 !filter_match_preds(call->filter, rec)) {
507 __trace_event_discard_commit(buffer, event);
514 void trace_free_pid_list(struct trace_pid_list *pid_list)
516 vfree(pid_list->pids);
521 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
522 * @filtered_pids: The list of pids to check
523 * @search_pid: The PID to find in @filtered_pids
525 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
528 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
531 * If pid_max changed after filtered_pids was created, we
532 * by default ignore all pids greater than the previous pid_max.
534 if (search_pid >= filtered_pids->pid_max)
537 return test_bit(search_pid, filtered_pids->pids);
541 * trace_ignore_this_task - should a task be ignored for tracing
542 * @filtered_pids: The list of pids to check
543 * @filtered_no_pids: The list of pids not to be traced
544 * @task: The task that should be ignored if not filtered
546 * Checks if @task should be traced or not from @filtered_pids.
547 * Returns true if @task should *NOT* be traced.
548 * Returns false if @task should be traced.
551 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
552 struct trace_pid_list *filtered_no_pids,
553 struct task_struct *task)
556 * If filtered_no_pids is not empty, and the task's pid is listed
557 * in filtered_no_pids, then return true.
558 * Otherwise, if filtered_pids is empty, that means we can
559 * trace all tasks. If it has content, then only trace pids
560 * within filtered_pids.
563 return (filtered_pids &&
564 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
566 trace_find_filtered_pid(filtered_no_pids, task->pid));
570 * trace_filter_add_remove_task - Add or remove a task from a pid_list
571 * @pid_list: The list to modify
572 * @self: The current task for fork or NULL for exit
573 * @task: The task to add or remove
575 * If adding a task, if @self is defined, the task is only added if @self
576 * is also included in @pid_list. This happens on fork and tasks should
577 * only be added when the parent is listed. If @self is NULL, then the
578 * @task pid will be removed from the list, which would happen on exit
581 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
582 struct task_struct *self,
583 struct task_struct *task)
588 /* For forks, we only add if the forking task is listed */
590 if (!trace_find_filtered_pid(pid_list, self->pid))
594 /* Sorry, but we don't support pid_max changing after setting */
595 if (task->pid >= pid_list->pid_max)
598 /* "self" is set for forks, and NULL for exits */
600 set_bit(task->pid, pid_list->pids);
602 clear_bit(task->pid, pid_list->pids);
606 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
607 * @pid_list: The pid list to show
608 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
609 * @pos: The position of the file
611 * This is used by the seq_file "next" operation to iterate the pids
612 * listed in a trace_pid_list structure.
614 * Returns the pid+1 as we want to display pid of zero, but NULL would
615 * stop the iteration.
617 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
619 unsigned long pid = (unsigned long)v;
623 /* pid already is +1 of the actual previous bit */
624 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
626 /* Return pid + 1 to allow zero to be represented */
627 if (pid < pid_list->pid_max)
628 return (void *)(pid + 1);
634 * trace_pid_start - Used for seq_file to start reading pid lists
635 * @pid_list: The pid list to show
636 * @pos: The position of the file
638 * This is used by seq_file "start" operation to start the iteration
641 * Returns the pid+1 as we want to display pid of zero, but NULL would
642 * stop the iteration.
644 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
649 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
650 if (pid >= pid_list->pid_max)
653 /* Return pid + 1 so that zero can be the exit value */
654 for (pid++; pid && l < *pos;
655 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
661 * trace_pid_show - show the current pid in seq_file processing
662 * @m: The seq_file structure to write into
663 * @v: A void pointer of the pid (+1) value to display
665 * Can be directly used by seq_file operations to display the current
668 int trace_pid_show(struct seq_file *m, void *v)
670 unsigned long pid = (unsigned long)v - 1;
672 seq_printf(m, "%lu\n", pid);
676 /* 128 should be much more than enough */
677 #define PID_BUF_SIZE 127
679 int trace_pid_write(struct trace_pid_list *filtered_pids,
680 struct trace_pid_list **new_pid_list,
681 const char __user *ubuf, size_t cnt)
683 struct trace_pid_list *pid_list;
684 struct trace_parser parser;
692 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
696 * Always recreate a new array. The write is an all or nothing
697 * operation. Always create a new array when adding new pids by
698 * the user. If the operation fails, then the current list is
701 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
703 trace_parser_put(&parser);
707 pid_list->pid_max = READ_ONCE(pid_max);
709 /* Only truncating will shrink pid_max */
710 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
711 pid_list->pid_max = filtered_pids->pid_max;
713 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
714 if (!pid_list->pids) {
715 trace_parser_put(&parser);
721 /* copy the current bits to the new max */
722 for_each_set_bit(pid, filtered_pids->pids,
723 filtered_pids->pid_max) {
724 set_bit(pid, pid_list->pids);
733 ret = trace_get_user(&parser, ubuf, cnt, &pos);
734 if (ret < 0 || !trace_parser_loaded(&parser))
742 if (kstrtoul(parser.buffer, 0, &val))
744 if (val >= pid_list->pid_max)
749 set_bit(pid, pid_list->pids);
752 trace_parser_clear(&parser);
755 trace_parser_put(&parser);
758 trace_free_pid_list(pid_list);
763 /* Cleared the list of pids */
764 trace_free_pid_list(pid_list);
769 *new_pid_list = pid_list;
774 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
778 /* Early boot up does not have a buffer yet */
780 return trace_clock_local();
782 ts = ring_buffer_time_stamp(buf->buffer);
783 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
788 u64 ftrace_now(int cpu)
790 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
794 * tracing_is_enabled - Show if global_trace has been enabled
796 * Shows if the global trace has been enabled or not. It uses the
797 * mirror flag "buffer_disabled" to be used in fast paths such as for
798 * the irqsoff tracer. But it may be inaccurate due to races. If you
799 * need to know the accurate state, use tracing_is_on() which is a little
800 * slower, but accurate.
802 int tracing_is_enabled(void)
805 * For quick access (irqsoff uses this in fast path), just
806 * return the mirror variable of the state of the ring buffer.
807 * It's a little racy, but we don't really care.
810 return !global_trace.buffer_disabled;
814 * trace_buf_size is the size in bytes that is allocated
815 * for a buffer. Note, the number of bytes is always rounded
818 * This number is purposely set to a low number of 16384.
819 * If the dump on oops happens, it will be much appreciated
820 * to not have to wait for all that output. Anyway this can be
821 * boot time and run time configurable.
823 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
825 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
827 /* trace_types holds a link list of available tracers. */
828 static struct tracer *trace_types __read_mostly;
831 * trace_types_lock is used to protect the trace_types list.
833 DEFINE_MUTEX(trace_types_lock);
836 * serialize the access of the ring buffer
838 * ring buffer serializes readers, but it is low level protection.
839 * The validity of the events (which returns by ring_buffer_peek() ..etc)
840 * are not protected by ring buffer.
842 * The content of events may become garbage if we allow other process consumes
843 * these events concurrently:
844 * A) the page of the consumed events may become a normal page
845 * (not reader page) in ring buffer, and this page will be rewritten
846 * by events producer.
847 * B) The page of the consumed events may become a page for splice_read,
848 * and this page will be returned to system.
850 * These primitives allow multi process access to different cpu ring buffer
853 * These primitives don't distinguish read-only and read-consume access.
854 * Multi read-only access are also serialized.
858 static DECLARE_RWSEM(all_cpu_access_lock);
859 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
861 static inline void trace_access_lock(int cpu)
863 if (cpu == RING_BUFFER_ALL_CPUS) {
864 /* gain it for accessing the whole ring buffer. */
865 down_write(&all_cpu_access_lock);
867 /* gain it for accessing a cpu ring buffer. */
869 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
870 down_read(&all_cpu_access_lock);
872 /* Secondly block other access to this @cpu ring buffer. */
873 mutex_lock(&per_cpu(cpu_access_lock, cpu));
877 static inline void trace_access_unlock(int cpu)
879 if (cpu == RING_BUFFER_ALL_CPUS) {
880 up_write(&all_cpu_access_lock);
882 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
883 up_read(&all_cpu_access_lock);
887 static inline void trace_access_lock_init(void)
891 for_each_possible_cpu(cpu)
892 mutex_init(&per_cpu(cpu_access_lock, cpu));
897 static DEFINE_MUTEX(access_lock);
899 static inline void trace_access_lock(int cpu)
902 mutex_lock(&access_lock);
905 static inline void trace_access_unlock(int cpu)
908 mutex_unlock(&access_lock);
911 static inline void trace_access_lock_init(void)
917 #ifdef CONFIG_STACKTRACE
918 static void __ftrace_trace_stack(struct trace_buffer *buffer,
919 unsigned int trace_ctx,
920 int skip, struct pt_regs *regs);
921 static inline void ftrace_trace_stack(struct trace_array *tr,
922 struct trace_buffer *buffer,
923 unsigned int trace_ctx,
924 int skip, struct pt_regs *regs);
927 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
928 unsigned int trace_ctx,
929 int skip, struct pt_regs *regs)
932 static inline void ftrace_trace_stack(struct trace_array *tr,
933 struct trace_buffer *buffer,
934 unsigned long trace_ctx,
935 int skip, struct pt_regs *regs)
941 static __always_inline void
942 trace_event_setup(struct ring_buffer_event *event,
943 int type, unsigned int trace_ctx)
945 struct trace_entry *ent = ring_buffer_event_data(event);
947 tracing_generic_entry_update(ent, type, trace_ctx);
950 static __always_inline struct ring_buffer_event *
951 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
954 unsigned int trace_ctx)
956 struct ring_buffer_event *event;
958 event = ring_buffer_lock_reserve(buffer, len);
960 trace_event_setup(event, type, trace_ctx);
965 void tracer_tracing_on(struct trace_array *tr)
967 if (tr->array_buffer.buffer)
968 ring_buffer_record_on(tr->array_buffer.buffer);
970 * This flag is looked at when buffers haven't been allocated
971 * yet, or by some tracers (like irqsoff), that just want to
972 * know if the ring buffer has been disabled, but it can handle
973 * races of where it gets disabled but we still do a record.
974 * As the check is in the fast path of the tracers, it is more
975 * important to be fast than accurate.
977 tr->buffer_disabled = 0;
978 /* Make the flag seen by readers */
983 * tracing_on - enable tracing buffers
985 * This function enables tracing buffers that may have been
986 * disabled with tracing_off.
988 void tracing_on(void)
990 tracer_tracing_on(&global_trace);
992 EXPORT_SYMBOL_GPL(tracing_on);
995 static __always_inline void
996 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
998 __this_cpu_write(trace_taskinfo_save, true);
1000 /* If this is the temp buffer, we need to commit fully */
1001 if (this_cpu_read(trace_buffered_event) == event) {
1002 /* Length is in event->array[0] */
1003 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1004 /* Release the temp buffer */
1005 this_cpu_dec(trace_buffered_event_cnt);
1007 ring_buffer_unlock_commit(buffer, event);
1011 * __trace_puts - write a constant string into the trace buffer.
1012 * @ip: The address of the caller
1013 * @str: The constant string to write
1014 * @size: The size of the string.
1016 int __trace_puts(unsigned long ip, const char *str, int size)
1018 struct ring_buffer_event *event;
1019 struct trace_buffer *buffer;
1020 struct print_entry *entry;
1021 unsigned int trace_ctx;
1024 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1027 if (unlikely(tracing_selftest_running || tracing_disabled))
1030 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1032 trace_ctx = tracing_gen_ctx();
1033 buffer = global_trace.array_buffer.buffer;
1034 ring_buffer_nest_start(buffer);
1035 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1042 entry = ring_buffer_event_data(event);
1045 memcpy(&entry->buf, str, size);
1047 /* Add a newline if necessary */
1048 if (entry->buf[size - 1] != '\n') {
1049 entry->buf[size] = '\n';
1050 entry->buf[size + 1] = '\0';
1052 entry->buf[size] = '\0';
1054 __buffer_unlock_commit(buffer, event);
1055 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1057 ring_buffer_nest_end(buffer);
1060 EXPORT_SYMBOL_GPL(__trace_puts);
1063 * __trace_bputs - write the pointer to a constant string into trace buffer
1064 * @ip: The address of the caller
1065 * @str: The constant string to write to the buffer to
1067 int __trace_bputs(unsigned long ip, const char *str)
1069 struct ring_buffer_event *event;
1070 struct trace_buffer *buffer;
1071 struct bputs_entry *entry;
1072 unsigned int trace_ctx;
1073 int size = sizeof(struct bputs_entry);
1076 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1079 if (unlikely(tracing_selftest_running || tracing_disabled))
1082 trace_ctx = tracing_gen_ctx();
1083 buffer = global_trace.array_buffer.buffer;
1085 ring_buffer_nest_start(buffer);
1086 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1091 entry = ring_buffer_event_data(event);
1095 __buffer_unlock_commit(buffer, event);
1096 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1100 ring_buffer_nest_end(buffer);
1103 EXPORT_SYMBOL_GPL(__trace_bputs);
1105 #ifdef CONFIG_TRACER_SNAPSHOT
1106 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1109 struct tracer *tracer = tr->current_trace;
1110 unsigned long flags;
1113 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1114 internal_trace_puts("*** snapshot is being ignored ***\n");
1118 if (!tr->allocated_snapshot) {
1119 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1120 internal_trace_puts("*** stopping trace here! ***\n");
1125 /* Note, snapshot can not be used when the tracer uses it */
1126 if (tracer->use_max_tr) {
1127 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1128 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1132 local_irq_save(flags);
1133 update_max_tr(tr, current, smp_processor_id(), cond_data);
1134 local_irq_restore(flags);
1137 void tracing_snapshot_instance(struct trace_array *tr)
1139 tracing_snapshot_instance_cond(tr, NULL);
1143 * tracing_snapshot - take a snapshot of the current buffer.
1145 * This causes a swap between the snapshot buffer and the current live
1146 * tracing buffer. You can use this to take snapshots of the live
1147 * trace when some condition is triggered, but continue to trace.
1149 * Note, make sure to allocate the snapshot with either
1150 * a tracing_snapshot_alloc(), or by doing it manually
1151 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1153 * If the snapshot buffer is not allocated, it will stop tracing.
1154 * Basically making a permanent snapshot.
1156 void tracing_snapshot(void)
1158 struct trace_array *tr = &global_trace;
1160 tracing_snapshot_instance(tr);
1162 EXPORT_SYMBOL_GPL(tracing_snapshot);
1165 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1166 * @tr: The tracing instance to snapshot
1167 * @cond_data: The data to be tested conditionally, and possibly saved
1169 * This is the same as tracing_snapshot() except that the snapshot is
1170 * conditional - the snapshot will only happen if the
1171 * cond_snapshot.update() implementation receiving the cond_data
1172 * returns true, which means that the trace array's cond_snapshot
1173 * update() operation used the cond_data to determine whether the
1174 * snapshot should be taken, and if it was, presumably saved it along
1175 * with the snapshot.
1177 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1179 tracing_snapshot_instance_cond(tr, cond_data);
1181 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1184 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1185 * @tr: The tracing instance
1187 * When the user enables a conditional snapshot using
1188 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1189 * with the snapshot. This accessor is used to retrieve it.
1191 * Should not be called from cond_snapshot.update(), since it takes
1192 * the tr->max_lock lock, which the code calling
1193 * cond_snapshot.update() has already done.
1195 * Returns the cond_data associated with the trace array's snapshot.
1197 void *tracing_cond_snapshot_data(struct trace_array *tr)
1199 void *cond_data = NULL;
1201 arch_spin_lock(&tr->max_lock);
1203 if (tr->cond_snapshot)
1204 cond_data = tr->cond_snapshot->cond_data;
1206 arch_spin_unlock(&tr->max_lock);
1210 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1212 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1213 struct array_buffer *size_buf, int cpu_id);
1214 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1216 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1220 if (!tr->allocated_snapshot) {
1222 /* allocate spare buffer */
1223 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1224 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1228 tr->allocated_snapshot = true;
1234 static void free_snapshot(struct trace_array *tr)
1237 * We don't free the ring buffer. instead, resize it because
1238 * The max_tr ring buffer has some state (e.g. ring->clock) and
1239 * we want preserve it.
1241 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1242 set_buffer_entries(&tr->max_buffer, 1);
1243 tracing_reset_online_cpus(&tr->max_buffer);
1244 tr->allocated_snapshot = false;
1248 * tracing_alloc_snapshot - allocate snapshot buffer.
1250 * This only allocates the snapshot buffer if it isn't already
1251 * allocated - it doesn't also take a snapshot.
1253 * This is meant to be used in cases where the snapshot buffer needs
1254 * to be set up for events that can't sleep but need to be able to
1255 * trigger a snapshot.
1257 int tracing_alloc_snapshot(void)
1259 struct trace_array *tr = &global_trace;
1262 ret = tracing_alloc_snapshot_instance(tr);
1267 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1270 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1272 * This is similar to tracing_snapshot(), but it will allocate the
1273 * snapshot buffer if it isn't already allocated. Use this only
1274 * where it is safe to sleep, as the allocation may sleep.
1276 * This causes a swap between the snapshot buffer and the current live
1277 * tracing buffer. You can use this to take snapshots of the live
1278 * trace when some condition is triggered, but continue to trace.
1280 void tracing_snapshot_alloc(void)
1284 ret = tracing_alloc_snapshot();
1290 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1293 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1294 * @tr: The tracing instance
1295 * @cond_data: User data to associate with the snapshot
1296 * @update: Implementation of the cond_snapshot update function
1298 * Check whether the conditional snapshot for the given instance has
1299 * already been enabled, or if the current tracer is already using a
1300 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1301 * save the cond_data and update function inside.
1303 * Returns 0 if successful, error otherwise.
1305 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1306 cond_update_fn_t update)
1308 struct cond_snapshot *cond_snapshot;
1311 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1315 cond_snapshot->cond_data = cond_data;
1316 cond_snapshot->update = update;
1318 mutex_lock(&trace_types_lock);
1320 ret = tracing_alloc_snapshot_instance(tr);
1324 if (tr->current_trace->use_max_tr) {
1330 * The cond_snapshot can only change to NULL without the
1331 * trace_types_lock. We don't care if we race with it going
1332 * to NULL, but we want to make sure that it's not set to
1333 * something other than NULL when we get here, which we can
1334 * do safely with only holding the trace_types_lock and not
1335 * having to take the max_lock.
1337 if (tr->cond_snapshot) {
1342 arch_spin_lock(&tr->max_lock);
1343 tr->cond_snapshot = cond_snapshot;
1344 arch_spin_unlock(&tr->max_lock);
1346 mutex_unlock(&trace_types_lock);
1351 mutex_unlock(&trace_types_lock);
1352 kfree(cond_snapshot);
1355 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1358 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1359 * @tr: The tracing instance
1361 * Check whether the conditional snapshot for the given instance is
1362 * enabled; if so, free the cond_snapshot associated with it,
1363 * otherwise return -EINVAL.
1365 * Returns 0 if successful, error otherwise.
1367 int tracing_snapshot_cond_disable(struct trace_array *tr)
1371 arch_spin_lock(&tr->max_lock);
1373 if (!tr->cond_snapshot)
1376 kfree(tr->cond_snapshot);
1377 tr->cond_snapshot = NULL;
1380 arch_spin_unlock(&tr->max_lock);
1384 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1386 void tracing_snapshot(void)
1388 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1390 EXPORT_SYMBOL_GPL(tracing_snapshot);
1391 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1393 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1395 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1396 int tracing_alloc_snapshot(void)
1398 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1401 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1402 void tracing_snapshot_alloc(void)
1407 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1408 void *tracing_cond_snapshot_data(struct trace_array *tr)
1412 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1413 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1417 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1418 int tracing_snapshot_cond_disable(struct trace_array *tr)
1422 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1423 #endif /* CONFIG_TRACER_SNAPSHOT */
1425 void tracer_tracing_off(struct trace_array *tr)
1427 if (tr->array_buffer.buffer)
1428 ring_buffer_record_off(tr->array_buffer.buffer);
1430 * This flag is looked at when buffers haven't been allocated
1431 * yet, or by some tracers (like irqsoff), that just want to
1432 * know if the ring buffer has been disabled, but it can handle
1433 * races of where it gets disabled but we still do a record.
1434 * As the check is in the fast path of the tracers, it is more
1435 * important to be fast than accurate.
1437 tr->buffer_disabled = 1;
1438 /* Make the flag seen by readers */
1443 * tracing_off - turn off tracing buffers
1445 * This function stops the tracing buffers from recording data.
1446 * It does not disable any overhead the tracers themselves may
1447 * be causing. This function simply causes all recording to
1448 * the ring buffers to fail.
1450 void tracing_off(void)
1452 tracer_tracing_off(&global_trace);
1454 EXPORT_SYMBOL_GPL(tracing_off);
1456 void disable_trace_on_warning(void)
1458 if (__disable_trace_on_warning) {
1459 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1460 "Disabling tracing due to warning\n");
1466 * tracer_tracing_is_on - show real state of ring buffer enabled
1467 * @tr : the trace array to know if ring buffer is enabled
1469 * Shows real state of the ring buffer if it is enabled or not.
1471 bool tracer_tracing_is_on(struct trace_array *tr)
1473 if (tr->array_buffer.buffer)
1474 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1475 return !tr->buffer_disabled;
1479 * tracing_is_on - show state of ring buffers enabled
1481 int tracing_is_on(void)
1483 return tracer_tracing_is_on(&global_trace);
1485 EXPORT_SYMBOL_GPL(tracing_is_on);
1487 static int __init set_buf_size(char *str)
1489 unsigned long buf_size;
1493 buf_size = memparse(str, &str);
1494 /* nr_entries can not be zero */
1497 trace_buf_size = buf_size;
1500 __setup("trace_buf_size=", set_buf_size);
1502 static int __init set_tracing_thresh(char *str)
1504 unsigned long threshold;
1509 ret = kstrtoul(str, 0, &threshold);
1512 tracing_thresh = threshold * 1000;
1515 __setup("tracing_thresh=", set_tracing_thresh);
1517 unsigned long nsecs_to_usecs(unsigned long nsecs)
1519 return nsecs / 1000;
1523 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1524 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1525 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1526 * of strings in the order that the evals (enum) were defined.
1531 /* These must match the bit positions in trace_iterator_flags */
1532 static const char *trace_options[] = {
1540 int in_ns; /* is this clock in nanoseconds? */
1541 } trace_clocks[] = {
1542 { trace_clock_local, "local", 1 },
1543 { trace_clock_global, "global", 1 },
1544 { trace_clock_counter, "counter", 0 },
1545 { trace_clock_jiffies, "uptime", 0 },
1546 { trace_clock, "perf", 1 },
1547 { ktime_get_mono_fast_ns, "mono", 1 },
1548 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1549 { ktime_get_boot_fast_ns, "boot", 1 },
1553 bool trace_clock_in_ns(struct trace_array *tr)
1555 if (trace_clocks[tr->clock_id].in_ns)
1562 * trace_parser_get_init - gets the buffer for trace parser
1564 int trace_parser_get_init(struct trace_parser *parser, int size)
1566 memset(parser, 0, sizeof(*parser));
1568 parser->buffer = kmalloc(size, GFP_KERNEL);
1569 if (!parser->buffer)
1572 parser->size = size;
1577 * trace_parser_put - frees the buffer for trace parser
1579 void trace_parser_put(struct trace_parser *parser)
1581 kfree(parser->buffer);
1582 parser->buffer = NULL;
1586 * trace_get_user - reads the user input string separated by space
1587 * (matched by isspace(ch))
1589 * For each string found the 'struct trace_parser' is updated,
1590 * and the function returns.
1592 * Returns number of bytes read.
1594 * See kernel/trace/trace.h for 'struct trace_parser' details.
1596 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1597 size_t cnt, loff_t *ppos)
1604 trace_parser_clear(parser);
1606 ret = get_user(ch, ubuf++);
1614 * The parser is not finished with the last write,
1615 * continue reading the user input without skipping spaces.
1617 if (!parser->cont) {
1618 /* skip white space */
1619 while (cnt && isspace(ch)) {
1620 ret = get_user(ch, ubuf++);
1629 /* only spaces were written */
1630 if (isspace(ch) || !ch) {
1637 /* read the non-space input */
1638 while (cnt && !isspace(ch) && ch) {
1639 if (parser->idx < parser->size - 1)
1640 parser->buffer[parser->idx++] = ch;
1645 ret = get_user(ch, ubuf++);
1652 /* We either got finished input or we have to wait for another call. */
1653 if (isspace(ch) || !ch) {
1654 parser->buffer[parser->idx] = 0;
1655 parser->cont = false;
1656 } else if (parser->idx < parser->size - 1) {
1657 parser->cont = true;
1658 parser->buffer[parser->idx++] = ch;
1659 /* Make sure the parsed string always terminates with '\0'. */
1660 parser->buffer[parser->idx] = 0;
1673 /* TODO add a seq_buf_to_buffer() */
1674 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1678 if (trace_seq_used(s) <= s->seq.readpos)
1681 len = trace_seq_used(s) - s->seq.readpos;
1684 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1686 s->seq.readpos += cnt;
1690 unsigned long __read_mostly tracing_thresh;
1691 static const struct file_operations tracing_max_lat_fops;
1693 #ifdef LATENCY_FS_NOTIFY
1695 static struct workqueue_struct *fsnotify_wq;
1697 static void latency_fsnotify_workfn(struct work_struct *work)
1699 struct trace_array *tr = container_of(work, struct trace_array,
1701 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1704 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1706 struct trace_array *tr = container_of(iwork, struct trace_array,
1708 queue_work(fsnotify_wq, &tr->fsnotify_work);
1711 static void trace_create_maxlat_file(struct trace_array *tr,
1712 struct dentry *d_tracer)
1714 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1715 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1716 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1717 d_tracer, &tr->max_latency,
1718 &tracing_max_lat_fops);
1721 __init static int latency_fsnotify_init(void)
1723 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1724 WQ_UNBOUND | WQ_HIGHPRI, 0);
1726 pr_err("Unable to allocate tr_max_lat_wq\n");
1732 late_initcall_sync(latency_fsnotify_init);
1734 void latency_fsnotify(struct trace_array *tr)
1739 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1740 * possible that we are called from __schedule() or do_idle(), which
1741 * could cause a deadlock.
1743 irq_work_queue(&tr->fsnotify_irqwork);
1747 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1748 * defined(CONFIG_FSNOTIFY)
1752 #define trace_create_maxlat_file(tr, d_tracer) \
1753 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1754 &tr->max_latency, &tracing_max_lat_fops)
1758 #ifdef CONFIG_TRACER_MAX_TRACE
1760 * Copy the new maximum trace into the separate maximum-trace
1761 * structure. (this way the maximum trace is permanently saved,
1762 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1765 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1767 struct array_buffer *trace_buf = &tr->array_buffer;
1768 struct array_buffer *max_buf = &tr->max_buffer;
1769 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1770 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1773 max_buf->time_start = data->preempt_timestamp;
1775 max_data->saved_latency = tr->max_latency;
1776 max_data->critical_start = data->critical_start;
1777 max_data->critical_end = data->critical_end;
1779 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1780 max_data->pid = tsk->pid;
1782 * If tsk == current, then use current_uid(), as that does not use
1783 * RCU. The irq tracer can be called out of RCU scope.
1786 max_data->uid = current_uid();
1788 max_data->uid = task_uid(tsk);
1790 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1791 max_data->policy = tsk->policy;
1792 max_data->rt_priority = tsk->rt_priority;
1794 /* record this tasks comm */
1795 tracing_record_cmdline(tsk);
1796 latency_fsnotify(tr);
1800 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1802 * @tsk: the task with the latency
1803 * @cpu: The cpu that initiated the trace.
1804 * @cond_data: User data associated with a conditional snapshot
1806 * Flip the buffers between the @tr and the max_tr and record information
1807 * about which task was the cause of this latency.
1810 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1816 WARN_ON_ONCE(!irqs_disabled());
1818 if (!tr->allocated_snapshot) {
1819 /* Only the nop tracer should hit this when disabling */
1820 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1824 arch_spin_lock(&tr->max_lock);
1826 /* Inherit the recordable setting from array_buffer */
1827 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1828 ring_buffer_record_on(tr->max_buffer.buffer);
1830 ring_buffer_record_off(tr->max_buffer.buffer);
1832 #ifdef CONFIG_TRACER_SNAPSHOT
1833 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1836 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1838 __update_max_tr(tr, tsk, cpu);
1841 arch_spin_unlock(&tr->max_lock);
1845 * update_max_tr_single - only copy one trace over, and reset the rest
1847 * @tsk: task with the latency
1848 * @cpu: the cpu of the buffer to copy.
1850 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1853 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1860 WARN_ON_ONCE(!irqs_disabled());
1861 if (!tr->allocated_snapshot) {
1862 /* Only the nop tracer should hit this when disabling */
1863 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1867 arch_spin_lock(&tr->max_lock);
1869 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1871 if (ret == -EBUSY) {
1873 * We failed to swap the buffer due to a commit taking
1874 * place on this CPU. We fail to record, but we reset
1875 * the max trace buffer (no one writes directly to it)
1876 * and flag that it failed.
1878 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1879 "Failed to swap buffers due to commit in progress\n");
1882 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1884 __update_max_tr(tr, tsk, cpu);
1885 arch_spin_unlock(&tr->max_lock);
1887 #endif /* CONFIG_TRACER_MAX_TRACE */
1889 static int wait_on_pipe(struct trace_iterator *iter, int full)
1891 /* Iterators are static, they should be filled or empty */
1892 if (trace_buffer_iter(iter, iter->cpu_file))
1895 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1899 #ifdef CONFIG_FTRACE_STARTUP_TEST
1900 static bool selftests_can_run;
1902 struct trace_selftests {
1903 struct list_head list;
1904 struct tracer *type;
1907 static LIST_HEAD(postponed_selftests);
1909 static int save_selftest(struct tracer *type)
1911 struct trace_selftests *selftest;
1913 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1917 selftest->type = type;
1918 list_add(&selftest->list, &postponed_selftests);
1922 static int run_tracer_selftest(struct tracer *type)
1924 struct trace_array *tr = &global_trace;
1925 struct tracer *saved_tracer = tr->current_trace;
1928 if (!type->selftest || tracing_selftest_disabled)
1932 * If a tracer registers early in boot up (before scheduling is
1933 * initialized and such), then do not run its selftests yet.
1934 * Instead, run it a little later in the boot process.
1936 if (!selftests_can_run)
1937 return save_selftest(type);
1939 if (!tracing_is_on()) {
1940 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1946 * Run a selftest on this tracer.
1947 * Here we reset the trace buffer, and set the current
1948 * tracer to be this tracer. The tracer can then run some
1949 * internal tracing to verify that everything is in order.
1950 * If we fail, we do not register this tracer.
1952 tracing_reset_online_cpus(&tr->array_buffer);
1954 tr->current_trace = type;
1956 #ifdef CONFIG_TRACER_MAX_TRACE
1957 if (type->use_max_tr) {
1958 /* If we expanded the buffers, make sure the max is expanded too */
1959 if (ring_buffer_expanded)
1960 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1961 RING_BUFFER_ALL_CPUS);
1962 tr->allocated_snapshot = true;
1966 /* the test is responsible for initializing and enabling */
1967 pr_info("Testing tracer %s: ", type->name);
1968 ret = type->selftest(type, tr);
1969 /* the test is responsible for resetting too */
1970 tr->current_trace = saved_tracer;
1972 printk(KERN_CONT "FAILED!\n");
1973 /* Add the warning after printing 'FAILED' */
1977 /* Only reset on passing, to avoid touching corrupted buffers */
1978 tracing_reset_online_cpus(&tr->array_buffer);
1980 #ifdef CONFIG_TRACER_MAX_TRACE
1981 if (type->use_max_tr) {
1982 tr->allocated_snapshot = false;
1984 /* Shrink the max buffer again */
1985 if (ring_buffer_expanded)
1986 ring_buffer_resize(tr->max_buffer.buffer, 1,
1987 RING_BUFFER_ALL_CPUS);
1991 printk(KERN_CONT "PASSED\n");
1995 static __init int init_trace_selftests(void)
1997 struct trace_selftests *p, *n;
1998 struct tracer *t, **last;
2001 selftests_can_run = true;
2003 mutex_lock(&trace_types_lock);
2005 if (list_empty(&postponed_selftests))
2008 pr_info("Running postponed tracer tests:\n");
2010 tracing_selftest_running = true;
2011 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2012 /* This loop can take minutes when sanitizers are enabled, so
2013 * lets make sure we allow RCU processing.
2016 ret = run_tracer_selftest(p->type);
2017 /* If the test fails, then warn and remove from available_tracers */
2019 WARN(1, "tracer: %s failed selftest, disabling\n",
2021 last = &trace_types;
2022 for (t = trace_types; t; t = t->next) {
2033 tracing_selftest_running = false;
2036 mutex_unlock(&trace_types_lock);
2040 core_initcall(init_trace_selftests);
2042 static inline int run_tracer_selftest(struct tracer *type)
2046 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2048 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2050 static void __init apply_trace_boot_options(void);
2053 * register_tracer - register a tracer with the ftrace system.
2054 * @type: the plugin for the tracer
2056 * Register a new plugin tracer.
2058 int __init register_tracer(struct tracer *type)
2064 pr_info("Tracer must have a name\n");
2068 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2069 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2073 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2074 pr_warn("Can not register tracer %s due to lockdown\n",
2079 mutex_lock(&trace_types_lock);
2081 tracing_selftest_running = true;
2083 for (t = trace_types; t; t = t->next) {
2084 if (strcmp(type->name, t->name) == 0) {
2086 pr_info("Tracer %s already registered\n",
2093 if (!type->set_flag)
2094 type->set_flag = &dummy_set_flag;
2096 /*allocate a dummy tracer_flags*/
2097 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2102 type->flags->val = 0;
2103 type->flags->opts = dummy_tracer_opt;
2105 if (!type->flags->opts)
2106 type->flags->opts = dummy_tracer_opt;
2108 /* store the tracer for __set_tracer_option */
2109 type->flags->trace = type;
2111 ret = run_tracer_selftest(type);
2115 type->next = trace_types;
2117 add_tracer_options(&global_trace, type);
2120 tracing_selftest_running = false;
2121 mutex_unlock(&trace_types_lock);
2123 if (ret || !default_bootup_tracer)
2126 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2129 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2130 /* Do we want this tracer to start on bootup? */
2131 tracing_set_tracer(&global_trace, type->name);
2132 default_bootup_tracer = NULL;
2134 apply_trace_boot_options();
2136 /* disable other selftests, since this will break it. */
2137 disable_tracing_selftest("running a tracer");
2143 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2145 struct trace_buffer *buffer = buf->buffer;
2150 ring_buffer_record_disable(buffer);
2152 /* Make sure all commits have finished */
2154 ring_buffer_reset_cpu(buffer, cpu);
2156 ring_buffer_record_enable(buffer);
2159 void tracing_reset_online_cpus(struct array_buffer *buf)
2161 struct trace_buffer *buffer = buf->buffer;
2166 ring_buffer_record_disable(buffer);
2168 /* Make sure all commits have finished */
2171 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2173 ring_buffer_reset_online_cpus(buffer);
2175 ring_buffer_record_enable(buffer);
2178 /* Must have trace_types_lock held */
2179 void tracing_reset_all_online_cpus(void)
2181 struct trace_array *tr;
2183 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2184 if (!tr->clear_trace)
2186 tr->clear_trace = false;
2187 tracing_reset_online_cpus(&tr->array_buffer);
2188 #ifdef CONFIG_TRACER_MAX_TRACE
2189 tracing_reset_online_cpus(&tr->max_buffer);
2195 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2196 * is the tgid last observed corresponding to pid=i.
2198 static int *tgid_map;
2200 /* The maximum valid index into tgid_map. */
2201 static size_t tgid_map_max;
2203 #define SAVED_CMDLINES_DEFAULT 128
2204 #define NO_CMDLINE_MAP UINT_MAX
2205 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2206 struct saved_cmdlines_buffer {
2207 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2208 unsigned *map_cmdline_to_pid;
2209 unsigned cmdline_num;
2211 char *saved_cmdlines;
2213 static struct saved_cmdlines_buffer *savedcmd;
2215 /* temporary disable recording */
2216 static atomic_t trace_record_taskinfo_disabled __read_mostly;
2218 static inline char *get_saved_cmdlines(int idx)
2220 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2223 static inline void set_cmdline(int idx, const char *cmdline)
2225 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2228 static int allocate_cmdlines_buffer(unsigned int val,
2229 struct saved_cmdlines_buffer *s)
2231 s->map_cmdline_to_pid = kmalloc_array(val,
2232 sizeof(*s->map_cmdline_to_pid),
2234 if (!s->map_cmdline_to_pid)
2237 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2238 if (!s->saved_cmdlines) {
2239 kfree(s->map_cmdline_to_pid);
2244 s->cmdline_num = val;
2245 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2246 sizeof(s->map_pid_to_cmdline));
2247 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2248 val * sizeof(*s->map_cmdline_to_pid));
2253 static int trace_create_savedcmd(void)
2257 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2261 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2271 int is_tracing_stopped(void)
2273 return global_trace.stop_count;
2277 * tracing_start - quick start of the tracer
2279 * If tracing is enabled but was stopped by tracing_stop,
2280 * this will start the tracer back up.
2282 void tracing_start(void)
2284 struct trace_buffer *buffer;
2285 unsigned long flags;
2287 if (tracing_disabled)
2290 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2291 if (--global_trace.stop_count) {
2292 if (global_trace.stop_count < 0) {
2293 /* Someone screwed up their debugging */
2295 global_trace.stop_count = 0;
2300 /* Prevent the buffers from switching */
2301 arch_spin_lock(&global_trace.max_lock);
2303 buffer = global_trace.array_buffer.buffer;
2305 ring_buffer_record_enable(buffer);
2307 #ifdef CONFIG_TRACER_MAX_TRACE
2308 buffer = global_trace.max_buffer.buffer;
2310 ring_buffer_record_enable(buffer);
2313 arch_spin_unlock(&global_trace.max_lock);
2316 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2319 static void tracing_start_tr(struct trace_array *tr)
2321 struct trace_buffer *buffer;
2322 unsigned long flags;
2324 if (tracing_disabled)
2327 /* If global, we need to also start the max tracer */
2328 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2329 return tracing_start();
2331 raw_spin_lock_irqsave(&tr->start_lock, flags);
2333 if (--tr->stop_count) {
2334 if (tr->stop_count < 0) {
2335 /* Someone screwed up their debugging */
2342 buffer = tr->array_buffer.buffer;
2344 ring_buffer_record_enable(buffer);
2347 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2351 * tracing_stop - quick stop of the tracer
2353 * Light weight way to stop tracing. Use in conjunction with
2356 void tracing_stop(void)
2358 struct trace_buffer *buffer;
2359 unsigned long flags;
2361 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2362 if (global_trace.stop_count++)
2365 /* Prevent the buffers from switching */
2366 arch_spin_lock(&global_trace.max_lock);
2368 buffer = global_trace.array_buffer.buffer;
2370 ring_buffer_record_disable(buffer);
2372 #ifdef CONFIG_TRACER_MAX_TRACE
2373 buffer = global_trace.max_buffer.buffer;
2375 ring_buffer_record_disable(buffer);
2378 arch_spin_unlock(&global_trace.max_lock);
2381 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2384 static void tracing_stop_tr(struct trace_array *tr)
2386 struct trace_buffer *buffer;
2387 unsigned long flags;
2389 /* If global, we need to also stop the max tracer */
2390 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2391 return tracing_stop();
2393 raw_spin_lock_irqsave(&tr->start_lock, flags);
2394 if (tr->stop_count++)
2397 buffer = tr->array_buffer.buffer;
2399 ring_buffer_record_disable(buffer);
2402 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2405 static int trace_save_cmdline(struct task_struct *tsk)
2409 /* treat recording of idle task as a success */
2413 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2416 * It's not the end of the world if we don't get
2417 * the lock, but we also don't want to spin
2418 * nor do we want to disable interrupts,
2419 * so if we miss here, then better luck next time.
2421 if (!arch_spin_trylock(&trace_cmdline_lock))
2424 idx = savedcmd->map_pid_to_cmdline[tpid];
2425 if (idx == NO_CMDLINE_MAP) {
2426 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2428 savedcmd->map_pid_to_cmdline[tpid] = idx;
2429 savedcmd->cmdline_idx = idx;
2432 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2433 set_cmdline(idx, tsk->comm);
2435 arch_spin_unlock(&trace_cmdline_lock);
2440 static void __trace_find_cmdline(int pid, char comm[])
2446 strcpy(comm, "<idle>");
2450 if (WARN_ON_ONCE(pid < 0)) {
2451 strcpy(comm, "<XXX>");
2455 tpid = pid & (PID_MAX_DEFAULT - 1);
2456 map = savedcmd->map_pid_to_cmdline[tpid];
2457 if (map != NO_CMDLINE_MAP) {
2458 tpid = savedcmd->map_cmdline_to_pid[map];
2460 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2464 strcpy(comm, "<...>");
2467 void trace_find_cmdline(int pid, char comm[])
2470 arch_spin_lock(&trace_cmdline_lock);
2472 __trace_find_cmdline(pid, comm);
2474 arch_spin_unlock(&trace_cmdline_lock);
2478 static int *trace_find_tgid_ptr(int pid)
2481 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2482 * if we observe a non-NULL tgid_map then we also observe the correct
2485 int *map = smp_load_acquire(&tgid_map);
2487 if (unlikely(!map || pid > tgid_map_max))
2493 int trace_find_tgid(int pid)
2495 int *ptr = trace_find_tgid_ptr(pid);
2497 return ptr ? *ptr : 0;
2500 static int trace_save_tgid(struct task_struct *tsk)
2504 /* treat recording of idle task as a success */
2508 ptr = trace_find_tgid_ptr(tsk->pid);
2516 static bool tracing_record_taskinfo_skip(int flags)
2518 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2520 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2522 if (!__this_cpu_read(trace_taskinfo_save))
2528 * tracing_record_taskinfo - record the task info of a task
2530 * @task: task to record
2531 * @flags: TRACE_RECORD_CMDLINE for recording comm
2532 * TRACE_RECORD_TGID for recording tgid
2534 void tracing_record_taskinfo(struct task_struct *task, int flags)
2538 if (tracing_record_taskinfo_skip(flags))
2542 * Record as much task information as possible. If some fail, continue
2543 * to try to record the others.
2545 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2546 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2548 /* If recording any information failed, retry again soon. */
2552 __this_cpu_write(trace_taskinfo_save, false);
2556 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2558 * @prev: previous task during sched_switch
2559 * @next: next task during sched_switch
2560 * @flags: TRACE_RECORD_CMDLINE for recording comm
2561 * TRACE_RECORD_TGID for recording tgid
2563 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2564 struct task_struct *next, int flags)
2568 if (tracing_record_taskinfo_skip(flags))
2572 * Record as much task information as possible. If some fail, continue
2573 * to try to record the others.
2575 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2576 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2577 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2578 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2580 /* If recording any information failed, retry again soon. */
2584 __this_cpu_write(trace_taskinfo_save, false);
2587 /* Helpers to record a specific task information */
2588 void tracing_record_cmdline(struct task_struct *task)
2590 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2593 void tracing_record_tgid(struct task_struct *task)
2595 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2599 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2600 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2601 * simplifies those functions and keeps them in sync.
2603 enum print_line_t trace_handle_return(struct trace_seq *s)
2605 return trace_seq_has_overflowed(s) ?
2606 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2608 EXPORT_SYMBOL_GPL(trace_handle_return);
2610 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2612 unsigned int trace_flags = irqs_status;
2615 pc = preempt_count();
2618 trace_flags |= TRACE_FLAG_NMI;
2619 if (pc & HARDIRQ_MASK)
2620 trace_flags |= TRACE_FLAG_HARDIRQ;
2621 if (in_serving_softirq())
2622 trace_flags |= TRACE_FLAG_SOFTIRQ;
2624 if (tif_need_resched())
2625 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2626 if (test_preempt_need_resched())
2627 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2628 return (trace_flags << 16) | (pc & 0xff);
2631 struct ring_buffer_event *
2632 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2635 unsigned int trace_ctx)
2637 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2640 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2641 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2642 static int trace_buffered_event_ref;
2645 * trace_buffered_event_enable - enable buffering events
2647 * When events are being filtered, it is quicker to use a temporary
2648 * buffer to write the event data into if there's a likely chance
2649 * that it will not be committed. The discard of the ring buffer
2650 * is not as fast as committing, and is much slower than copying
2653 * When an event is to be filtered, allocate per cpu buffers to
2654 * write the event data into, and if the event is filtered and discarded
2655 * it is simply dropped, otherwise, the entire data is to be committed
2658 void trace_buffered_event_enable(void)
2660 struct ring_buffer_event *event;
2664 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2666 if (trace_buffered_event_ref++)
2669 for_each_tracing_cpu(cpu) {
2670 page = alloc_pages_node(cpu_to_node(cpu),
2671 GFP_KERNEL | __GFP_NORETRY, 0);
2675 event = page_address(page);
2676 memset(event, 0, sizeof(*event));
2678 per_cpu(trace_buffered_event, cpu) = event;
2681 if (cpu == smp_processor_id() &&
2682 __this_cpu_read(trace_buffered_event) !=
2683 per_cpu(trace_buffered_event, cpu))
2690 trace_buffered_event_disable();
2693 static void enable_trace_buffered_event(void *data)
2695 /* Probably not needed, but do it anyway */
2697 this_cpu_dec(trace_buffered_event_cnt);
2700 static void disable_trace_buffered_event(void *data)
2702 this_cpu_inc(trace_buffered_event_cnt);
2706 * trace_buffered_event_disable - disable buffering events
2708 * When a filter is removed, it is faster to not use the buffered
2709 * events, and to commit directly into the ring buffer. Free up
2710 * the temp buffers when there are no more users. This requires
2711 * special synchronization with current events.
2713 void trace_buffered_event_disable(void)
2717 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2719 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2722 if (--trace_buffered_event_ref)
2726 /* For each CPU, set the buffer as used. */
2727 smp_call_function_many(tracing_buffer_mask,
2728 disable_trace_buffered_event, NULL, 1);
2731 /* Wait for all current users to finish */
2734 for_each_tracing_cpu(cpu) {
2735 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2736 per_cpu(trace_buffered_event, cpu) = NULL;
2739 * Make sure trace_buffered_event is NULL before clearing
2740 * trace_buffered_event_cnt.
2745 /* Do the work on each cpu */
2746 smp_call_function_many(tracing_buffer_mask,
2747 enable_trace_buffered_event, NULL, 1);
2751 static struct trace_buffer *temp_buffer;
2753 struct ring_buffer_event *
2754 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2755 struct trace_event_file *trace_file,
2756 int type, unsigned long len,
2757 unsigned int trace_ctx)
2759 struct ring_buffer_event *entry;
2760 struct trace_array *tr = trace_file->tr;
2763 *current_rb = tr->array_buffer.buffer;
2765 if (!tr->no_filter_buffering_ref &&
2766 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2767 (entry = this_cpu_read(trace_buffered_event))) {
2769 * Filtering is on, so try to use the per cpu buffer first.
2770 * This buffer will simulate a ring_buffer_event,
2771 * where the type_len is zero and the array[0] will
2772 * hold the full length.
2773 * (see include/linux/ring-buffer.h for details on
2774 * how the ring_buffer_event is structured).
2776 * Using a temp buffer during filtering and copying it
2777 * on a matched filter is quicker than writing directly
2778 * into the ring buffer and then discarding it when
2779 * it doesn't match. That is because the discard
2780 * requires several atomic operations to get right.
2781 * Copying on match and doing nothing on a failed match
2782 * is still quicker than no copy on match, but having
2783 * to discard out of the ring buffer on a failed match.
2785 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2787 val = this_cpu_inc_return(trace_buffered_event_cnt);
2790 * Preemption is disabled, but interrupts and NMIs
2791 * can still come in now. If that happens after
2792 * the above increment, then it will have to go
2793 * back to the old method of allocating the event
2794 * on the ring buffer, and if the filter fails, it
2795 * will have to call ring_buffer_discard_commit()
2798 * Need to also check the unlikely case that the
2799 * length is bigger than the temp buffer size.
2800 * If that happens, then the reserve is pretty much
2801 * guaranteed to fail, as the ring buffer currently
2802 * only allows events less than a page. But that may
2803 * change in the future, so let the ring buffer reserve
2804 * handle the failure in that case.
2806 if (val == 1 && likely(len <= max_len)) {
2807 trace_event_setup(entry, type, trace_ctx);
2808 entry->array[0] = len;
2811 this_cpu_dec(trace_buffered_event_cnt);
2814 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2817 * If tracing is off, but we have triggers enabled
2818 * we still need to look at the event data. Use the temp_buffer
2819 * to store the trace event for the trigger to use. It's recursive
2820 * safe and will not be recorded anywhere.
2822 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2823 *current_rb = temp_buffer;
2824 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2829 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2831 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2832 static DEFINE_MUTEX(tracepoint_printk_mutex);
2834 static void output_printk(struct trace_event_buffer *fbuffer)
2836 struct trace_event_call *event_call;
2837 struct trace_event_file *file;
2838 struct trace_event *event;
2839 unsigned long flags;
2840 struct trace_iterator *iter = tracepoint_print_iter;
2842 /* We should never get here if iter is NULL */
2843 if (WARN_ON_ONCE(!iter))
2846 event_call = fbuffer->trace_file->event_call;
2847 if (!event_call || !event_call->event.funcs ||
2848 !event_call->event.funcs->trace)
2851 file = fbuffer->trace_file;
2852 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2853 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2854 !filter_match_preds(file->filter, fbuffer->entry)))
2857 event = &fbuffer->trace_file->event_call->event;
2859 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2860 trace_seq_init(&iter->seq);
2861 iter->ent = fbuffer->entry;
2862 event_call->event.funcs->trace(iter, 0, event);
2863 trace_seq_putc(&iter->seq, 0);
2864 printk("%s", iter->seq.buffer);
2866 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2869 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2870 void *buffer, size_t *lenp,
2873 int save_tracepoint_printk;
2876 mutex_lock(&tracepoint_printk_mutex);
2877 save_tracepoint_printk = tracepoint_printk;
2879 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2882 * This will force exiting early, as tracepoint_printk
2883 * is always zero when tracepoint_printk_iter is not allocated
2885 if (!tracepoint_print_iter)
2886 tracepoint_printk = 0;
2888 if (save_tracepoint_printk == tracepoint_printk)
2891 if (tracepoint_printk)
2892 static_key_enable(&tracepoint_printk_key.key);
2894 static_key_disable(&tracepoint_printk_key.key);
2897 mutex_unlock(&tracepoint_printk_mutex);
2902 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2904 if (static_key_false(&tracepoint_printk_key.key))
2905 output_printk(fbuffer);
2907 if (static_branch_unlikely(&trace_event_exports_enabled))
2908 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2909 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2910 fbuffer->event, fbuffer->entry,
2911 fbuffer->trace_ctx, fbuffer->regs);
2913 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2918 * trace_buffer_unlock_commit_regs()
2919 * trace_event_buffer_commit()
2920 * trace_event_raw_event_xxx()
2922 # define STACK_SKIP 3
2924 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2925 struct trace_buffer *buffer,
2926 struct ring_buffer_event *event,
2927 unsigned int trace_ctx,
2928 struct pt_regs *regs)
2930 __buffer_unlock_commit(buffer, event);
2933 * If regs is not set, then skip the necessary functions.
2934 * Note, we can still get here via blktrace, wakeup tracer
2935 * and mmiotrace, but that's ok if they lose a function or
2936 * two. They are not that meaningful.
2938 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2939 ftrace_trace_userstack(tr, buffer, trace_ctx);
2943 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2946 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2947 struct ring_buffer_event *event)
2949 __buffer_unlock_commit(buffer, event);
2953 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2954 parent_ip, unsigned int trace_ctx)
2956 struct trace_event_call *call = &event_function;
2957 struct trace_buffer *buffer = tr->array_buffer.buffer;
2958 struct ring_buffer_event *event;
2959 struct ftrace_entry *entry;
2961 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2965 entry = ring_buffer_event_data(event);
2967 entry->parent_ip = parent_ip;
2969 if (!call_filter_check_discard(call, entry, buffer, event)) {
2970 if (static_branch_unlikely(&trace_function_exports_enabled))
2971 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2972 __buffer_unlock_commit(buffer, event);
2976 #ifdef CONFIG_STACKTRACE
2978 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2979 #define FTRACE_KSTACK_NESTING 4
2981 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2983 struct ftrace_stack {
2984 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2988 struct ftrace_stacks {
2989 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2992 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2993 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2995 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2996 unsigned int trace_ctx,
2997 int skip, struct pt_regs *regs)
2999 struct trace_event_call *call = &event_kernel_stack;
3000 struct ring_buffer_event *event;
3001 unsigned int size, nr_entries;
3002 struct ftrace_stack *fstack;
3003 struct stack_entry *entry;
3007 * Add one, for this function and the call to save_stack_trace()
3008 * If regs is set, then these functions will not be in the way.
3010 #ifndef CONFIG_UNWINDER_ORC
3015 preempt_disable_notrace();
3017 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3019 /* This should never happen. If it does, yell once and skip */
3020 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3024 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3025 * interrupt will either see the value pre increment or post
3026 * increment. If the interrupt happens pre increment it will have
3027 * restored the counter when it returns. We just need a barrier to
3028 * keep gcc from moving things around.
3032 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3033 size = ARRAY_SIZE(fstack->calls);
3036 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3039 nr_entries = stack_trace_save(fstack->calls, size, skip);
3042 size = nr_entries * sizeof(unsigned long);
3043 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3044 (sizeof(*entry) - sizeof(entry->caller)) + size,
3048 entry = ring_buffer_event_data(event);
3050 memcpy(&entry->caller, fstack->calls, size);
3051 entry->size = nr_entries;
3053 if (!call_filter_check_discard(call, entry, buffer, event))
3054 __buffer_unlock_commit(buffer, event);
3057 /* Again, don't let gcc optimize things here */
3059 __this_cpu_dec(ftrace_stack_reserve);
3060 preempt_enable_notrace();
3064 static inline void ftrace_trace_stack(struct trace_array *tr,
3065 struct trace_buffer *buffer,
3066 unsigned int trace_ctx,
3067 int skip, struct pt_regs *regs)
3069 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3072 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3075 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3078 struct trace_buffer *buffer = tr->array_buffer.buffer;
3080 if (rcu_is_watching()) {
3081 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3086 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3087 * but if the above rcu_is_watching() failed, then the NMI
3088 * triggered someplace critical, and rcu_irq_enter() should
3089 * not be called from NMI.
3091 if (unlikely(in_nmi()))
3094 rcu_irq_enter_irqson();
3095 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3096 rcu_irq_exit_irqson();
3100 * trace_dump_stack - record a stack back trace in the trace buffer
3101 * @skip: Number of functions to skip (helper handlers)
3103 void trace_dump_stack(int skip)
3105 if (tracing_disabled || tracing_selftest_running)
3108 #ifndef CONFIG_UNWINDER_ORC
3109 /* Skip 1 to skip this function. */
3112 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3113 tracing_gen_ctx(), skip, NULL);
3115 EXPORT_SYMBOL_GPL(trace_dump_stack);
3117 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3118 static DEFINE_PER_CPU(int, user_stack_count);
3121 ftrace_trace_userstack(struct trace_array *tr,
3122 struct trace_buffer *buffer, unsigned int trace_ctx)
3124 struct trace_event_call *call = &event_user_stack;
3125 struct ring_buffer_event *event;
3126 struct userstack_entry *entry;
3128 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3132 * NMIs can not handle page faults, even with fix ups.
3133 * The save user stack can (and often does) fault.
3135 if (unlikely(in_nmi()))
3139 * prevent recursion, since the user stack tracing may
3140 * trigger other kernel events.
3143 if (__this_cpu_read(user_stack_count))
3146 __this_cpu_inc(user_stack_count);
3148 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3149 sizeof(*entry), trace_ctx);
3151 goto out_drop_count;
3152 entry = ring_buffer_event_data(event);
3154 entry->tgid = current->tgid;
3155 memset(&entry->caller, 0, sizeof(entry->caller));
3157 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3158 if (!call_filter_check_discard(call, entry, buffer, event))
3159 __buffer_unlock_commit(buffer, event);
3162 __this_cpu_dec(user_stack_count);
3166 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3167 static void ftrace_trace_userstack(struct trace_array *tr,
3168 struct trace_buffer *buffer,
3169 unsigned int trace_ctx)
3172 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3174 #endif /* CONFIG_STACKTRACE */
3177 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3178 unsigned long long delta)
3180 entry->bottom_delta_ts = delta & U32_MAX;
3181 entry->top_delta_ts = (delta >> 32);
3184 void trace_last_func_repeats(struct trace_array *tr,
3185 struct trace_func_repeats *last_info,
3186 unsigned int trace_ctx)
3188 struct trace_buffer *buffer = tr->array_buffer.buffer;
3189 struct func_repeats_entry *entry;
3190 struct ring_buffer_event *event;
3193 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3194 sizeof(*entry), trace_ctx);
3198 delta = ring_buffer_event_time_stamp(buffer, event) -
3199 last_info->ts_last_call;
3201 entry = ring_buffer_event_data(event);
3202 entry->ip = last_info->ip;
3203 entry->parent_ip = last_info->parent_ip;
3204 entry->count = last_info->count;
3205 func_repeats_set_delta_ts(entry, delta);
3207 __buffer_unlock_commit(buffer, event);
3210 /* created for use with alloc_percpu */
3211 struct trace_buffer_struct {
3213 char buffer[4][TRACE_BUF_SIZE];
3216 static struct trace_buffer_struct *trace_percpu_buffer;
3219 * This allows for lockless recording. If we're nested too deeply, then
3220 * this returns NULL.
3222 static char *get_trace_buf(void)
3224 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3226 if (!buffer || buffer->nesting >= 4)
3231 /* Interrupts must see nesting incremented before we use the buffer */
3233 return &buffer->buffer[buffer->nesting - 1][0];
3236 static void put_trace_buf(void)
3238 /* Don't let the decrement of nesting leak before this */
3240 this_cpu_dec(trace_percpu_buffer->nesting);
3243 static int alloc_percpu_trace_buffer(void)
3245 struct trace_buffer_struct *buffers;
3247 if (trace_percpu_buffer)
3250 buffers = alloc_percpu(struct trace_buffer_struct);
3251 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3254 trace_percpu_buffer = buffers;
3258 static int buffers_allocated;
3260 void trace_printk_init_buffers(void)
3262 if (buffers_allocated)
3265 if (alloc_percpu_trace_buffer())
3268 /* trace_printk() is for debug use only. Don't use it in production. */
3271 pr_warn("**********************************************************\n");
3272 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3274 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3276 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3277 pr_warn("** unsafe for production use. **\n");
3279 pr_warn("** If you see this message and you are not debugging **\n");
3280 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3282 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3283 pr_warn("**********************************************************\n");
3285 /* Expand the buffers to set size */
3286 tracing_update_buffers();
3288 buffers_allocated = 1;
3291 * trace_printk_init_buffers() can be called by modules.
3292 * If that happens, then we need to start cmdline recording
3293 * directly here. If the global_trace.buffer is already
3294 * allocated here, then this was called by module code.
3296 if (global_trace.array_buffer.buffer)
3297 tracing_start_cmdline_record();
3299 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3301 void trace_printk_start_comm(void)
3303 /* Start tracing comms if trace printk is set */
3304 if (!buffers_allocated)
3306 tracing_start_cmdline_record();
3309 static void trace_printk_start_stop_comm(int enabled)
3311 if (!buffers_allocated)
3315 tracing_start_cmdline_record();
3317 tracing_stop_cmdline_record();
3321 * trace_vbprintk - write binary msg to tracing buffer
3322 * @ip: The address of the caller
3323 * @fmt: The string format to write to the buffer
3324 * @args: Arguments for @fmt
3326 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3328 struct trace_event_call *call = &event_bprint;
3329 struct ring_buffer_event *event;
3330 struct trace_buffer *buffer;
3331 struct trace_array *tr = &global_trace;
3332 struct bprint_entry *entry;
3333 unsigned int trace_ctx;
3337 if (unlikely(tracing_selftest_running || tracing_disabled))
3340 /* Don't pollute graph traces with trace_vprintk internals */
3341 pause_graph_tracing();
3343 trace_ctx = tracing_gen_ctx();
3344 preempt_disable_notrace();
3346 tbuffer = get_trace_buf();
3352 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3354 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3357 size = sizeof(*entry) + sizeof(u32) * len;
3358 buffer = tr->array_buffer.buffer;
3359 ring_buffer_nest_start(buffer);
3360 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3364 entry = ring_buffer_event_data(event);
3368 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3369 if (!call_filter_check_discard(call, entry, buffer, event)) {
3370 __buffer_unlock_commit(buffer, event);
3371 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3375 ring_buffer_nest_end(buffer);
3380 preempt_enable_notrace();
3381 unpause_graph_tracing();
3385 EXPORT_SYMBOL_GPL(trace_vbprintk);
3389 __trace_array_vprintk(struct trace_buffer *buffer,
3390 unsigned long ip, const char *fmt, va_list args)
3392 struct trace_event_call *call = &event_print;
3393 struct ring_buffer_event *event;
3395 struct print_entry *entry;
3396 unsigned int trace_ctx;
3399 if (tracing_disabled || tracing_selftest_running)
3402 /* Don't pollute graph traces with trace_vprintk internals */
3403 pause_graph_tracing();
3405 trace_ctx = tracing_gen_ctx();
3406 preempt_disable_notrace();
3409 tbuffer = get_trace_buf();
3415 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3417 size = sizeof(*entry) + len + 1;
3418 ring_buffer_nest_start(buffer);
3419 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3423 entry = ring_buffer_event_data(event);
3426 memcpy(&entry->buf, tbuffer, len + 1);
3427 if (!call_filter_check_discard(call, entry, buffer, event)) {
3428 __buffer_unlock_commit(buffer, event);
3429 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3433 ring_buffer_nest_end(buffer);
3437 preempt_enable_notrace();
3438 unpause_graph_tracing();
3444 int trace_array_vprintk(struct trace_array *tr,
3445 unsigned long ip, const char *fmt, va_list args)
3447 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3451 * trace_array_printk - Print a message to a specific instance
3452 * @tr: The instance trace_array descriptor
3453 * @ip: The instruction pointer that this is called from.
3454 * @fmt: The format to print (printf format)
3456 * If a subsystem sets up its own instance, they have the right to
3457 * printk strings into their tracing instance buffer using this
3458 * function. Note, this function will not write into the top level
3459 * buffer (use trace_printk() for that), as writing into the top level
3460 * buffer should only have events that can be individually disabled.
3461 * trace_printk() is only used for debugging a kernel, and should not
3462 * be ever incorporated in normal use.
3464 * trace_array_printk() can be used, as it will not add noise to the
3465 * top level tracing buffer.
3467 * Note, trace_array_init_printk() must be called on @tr before this
3471 int trace_array_printk(struct trace_array *tr,
3472 unsigned long ip, const char *fmt, ...)
3480 /* This is only allowed for created instances */
3481 if (tr == &global_trace)
3484 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3488 ret = trace_array_vprintk(tr, ip, fmt, ap);
3492 EXPORT_SYMBOL_GPL(trace_array_printk);
3495 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3496 * @tr: The trace array to initialize the buffers for
3498 * As trace_array_printk() only writes into instances, they are OK to
3499 * have in the kernel (unlike trace_printk()). This needs to be called
3500 * before trace_array_printk() can be used on a trace_array.
3502 int trace_array_init_printk(struct trace_array *tr)
3507 /* This is only allowed for created instances */
3508 if (tr == &global_trace)
3511 return alloc_percpu_trace_buffer();
3513 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3516 int trace_array_printk_buf(struct trace_buffer *buffer,
3517 unsigned long ip, const char *fmt, ...)
3522 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3526 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3532 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3534 return trace_array_vprintk(&global_trace, ip, fmt, args);
3536 EXPORT_SYMBOL_GPL(trace_vprintk);
3538 static void trace_iterator_increment(struct trace_iterator *iter)
3540 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3544 ring_buffer_iter_advance(buf_iter);
3547 static struct trace_entry *
3548 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3549 unsigned long *lost_events)
3551 struct ring_buffer_event *event;
3552 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3555 event = ring_buffer_iter_peek(buf_iter, ts);
3557 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3558 (unsigned long)-1 : 0;
3560 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3565 iter->ent_size = ring_buffer_event_length(event);
3566 return ring_buffer_event_data(event);
3572 static struct trace_entry *
3573 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3574 unsigned long *missing_events, u64 *ent_ts)
3576 struct trace_buffer *buffer = iter->array_buffer->buffer;
3577 struct trace_entry *ent, *next = NULL;
3578 unsigned long lost_events = 0, next_lost = 0;
3579 int cpu_file = iter->cpu_file;
3580 u64 next_ts = 0, ts;
3586 * If we are in a per_cpu trace file, don't bother by iterating over
3587 * all cpu and peek directly.
3589 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3590 if (ring_buffer_empty_cpu(buffer, cpu_file))
3592 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3594 *ent_cpu = cpu_file;
3599 for_each_tracing_cpu(cpu) {
3601 if (ring_buffer_empty_cpu(buffer, cpu))
3604 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3607 * Pick the entry with the smallest timestamp:
3609 if (ent && (!next || ts < next_ts)) {
3613 next_lost = lost_events;
3614 next_size = iter->ent_size;
3618 iter->ent_size = next_size;
3621 *ent_cpu = next_cpu;
3627 *missing_events = next_lost;
3632 #define STATIC_FMT_BUF_SIZE 128
3633 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3635 static char *trace_iter_expand_format(struct trace_iterator *iter)
3640 * iter->tr is NULL when used with tp_printk, which makes
3641 * this get called where it is not safe to call krealloc().
3643 if (!iter->tr || iter->fmt == static_fmt_buf)
3646 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3649 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3656 /* Returns true if the string is safe to dereference from an event */
3657 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3659 unsigned long addr = (unsigned long)str;
3660 struct trace_event *trace_event;
3661 struct trace_event_call *event;
3663 /* OK if part of the event data */
3664 if ((addr >= (unsigned long)iter->ent) &&
3665 (addr < (unsigned long)iter->ent + iter->ent_size))
3668 /* OK if part of the temp seq buffer */
3669 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3670 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3673 /* Core rodata can not be freed */
3674 if (is_kernel_rodata(addr))
3677 if (trace_is_tracepoint_string(str))
3681 * Now this could be a module event, referencing core module
3682 * data, which is OK.
3687 trace_event = ftrace_find_event(iter->ent->type);
3691 event = container_of(trace_event, struct trace_event_call, event);
3695 /* Would rather have rodata, but this will suffice */
3696 if (within_module_core(addr, event->mod))
3702 static const char *show_buffer(struct trace_seq *s)
3704 struct seq_buf *seq = &s->seq;
3706 seq_buf_terminate(seq);
3711 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3713 static int test_can_verify_check(const char *fmt, ...)
3720 * The verifier is dependent on vsnprintf() modifies the va_list
3721 * passed to it, where it is sent as a reference. Some architectures
3722 * (like x86_32) passes it by value, which means that vsnprintf()
3723 * does not modify the va_list passed to it, and the verifier
3724 * would then need to be able to understand all the values that
3725 * vsnprintf can use. If it is passed by value, then the verifier
3729 vsnprintf(buf, 16, "%d", ap);
3730 ret = va_arg(ap, int);
3736 static void test_can_verify(void)
3738 if (!test_can_verify_check("%d %d", 0, 1)) {
3739 pr_info("trace event string verifier disabled\n");
3740 static_branch_inc(&trace_no_verify);
3745 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3746 * @iter: The iterator that holds the seq buffer and the event being printed
3747 * @fmt: The format used to print the event
3748 * @ap: The va_list holding the data to print from @fmt.
3750 * This writes the data into the @iter->seq buffer using the data from
3751 * @fmt and @ap. If the format has a %s, then the source of the string
3752 * is examined to make sure it is safe to print, otherwise it will
3753 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3756 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3759 const char *p = fmt;
3763 if (WARN_ON_ONCE(!fmt))
3766 if (static_branch_unlikely(&trace_no_verify))
3769 /* Don't bother checking when doing a ftrace_dump() */
3770 if (iter->fmt == static_fmt_buf)
3779 /* We only care about %s and variants */
3780 for (i = 0; p[i]; i++) {
3781 if (i + 1 >= iter->fmt_size) {
3783 * If we can't expand the copy buffer,
3786 if (!trace_iter_expand_format(iter))
3790 if (p[i] == '\\' && p[i+1]) {
3795 /* Need to test cases like %08.*s */
3796 for (j = 1; p[i+j]; j++) {
3797 if (isdigit(p[i+j]) ||
3800 if (p[i+j] == '*') {
3812 /* If no %s found then just print normally */
3816 /* Copy up to the %s, and print that */
3817 strncpy(iter->fmt, p, i);
3818 iter->fmt[i] = '\0';
3819 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3822 len = va_arg(ap, int);
3824 /* The ap now points to the string data of the %s */
3825 str = va_arg(ap, const char *);
3828 * If you hit this warning, it is likely that the
3829 * trace event in question used %s on a string that
3830 * was saved at the time of the event, but may not be
3831 * around when the trace is read. Use __string(),
3832 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3833 * instead. See samples/trace_events/trace-events-sample.h
3836 if (WARN_ONCE(!trace_safe_str(iter, str),
3837 "fmt: '%s' current_buffer: '%s'",
3838 fmt, show_buffer(&iter->seq))) {
3841 /* Try to safely read the string */
3843 if (len + 1 > iter->fmt_size)
3844 len = iter->fmt_size - 1;
3847 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3851 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3855 trace_seq_printf(&iter->seq, "(0x%px)", str);
3857 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3859 str = "[UNSAFE-MEMORY]";
3860 strcpy(iter->fmt, "%s");
3862 strncpy(iter->fmt, p + i, j + 1);
3863 iter->fmt[j+1] = '\0';
3866 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3868 trace_seq_printf(&iter->seq, iter->fmt, str);
3874 trace_seq_vprintf(&iter->seq, p, ap);
3877 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3879 const char *p, *new_fmt;
3882 if (WARN_ON_ONCE(!fmt))
3885 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3889 new_fmt = q = iter->fmt;
3891 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3892 if (!trace_iter_expand_format(iter))
3895 q += iter->fmt - new_fmt;
3896 new_fmt = iter->fmt;
3901 /* Replace %p with %px */
3905 } else if (p[0] == 'p' && !isalnum(p[1])) {
3916 #define STATIC_TEMP_BUF_SIZE 128
3917 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3919 /* Find the next real entry, without updating the iterator itself */
3920 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3921 int *ent_cpu, u64 *ent_ts)
3923 /* __find_next_entry will reset ent_size */
3924 int ent_size = iter->ent_size;
3925 struct trace_entry *entry;
3928 * If called from ftrace_dump(), then the iter->temp buffer
3929 * will be the static_temp_buf and not created from kmalloc.
3930 * If the entry size is greater than the buffer, we can
3931 * not save it. Just return NULL in that case. This is only
3932 * used to add markers when two consecutive events' time
3933 * stamps have a large delta. See trace_print_lat_context()
3935 if (iter->temp == static_temp_buf &&
3936 STATIC_TEMP_BUF_SIZE < ent_size)
3940 * The __find_next_entry() may call peek_next_entry(), which may
3941 * call ring_buffer_peek() that may make the contents of iter->ent
3942 * undefined. Need to copy iter->ent now.
3944 if (iter->ent && iter->ent != iter->temp) {
3945 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3946 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3948 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3953 iter->temp_size = iter->ent_size;
3955 memcpy(iter->temp, iter->ent, iter->ent_size);
3956 iter->ent = iter->temp;
3958 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3959 /* Put back the original ent_size */
3960 iter->ent_size = ent_size;
3965 /* Find the next real entry, and increment the iterator to the next entry */
3966 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3968 iter->ent = __find_next_entry(iter, &iter->cpu,
3969 &iter->lost_events, &iter->ts);
3972 trace_iterator_increment(iter);
3974 return iter->ent ? iter : NULL;
3977 static void trace_consume(struct trace_iterator *iter)
3979 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3980 &iter->lost_events);
3983 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3985 struct trace_iterator *iter = m->private;
3989 WARN_ON_ONCE(iter->leftover);
3993 /* can't go backwards */
3998 ent = trace_find_next_entry_inc(iter);
4002 while (ent && iter->idx < i)
4003 ent = trace_find_next_entry_inc(iter);
4010 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4012 struct ring_buffer_iter *buf_iter;
4013 unsigned long entries = 0;
4016 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4018 buf_iter = trace_buffer_iter(iter, cpu);
4022 ring_buffer_iter_reset(buf_iter);
4025 * We could have the case with the max latency tracers
4026 * that a reset never took place on a cpu. This is evident
4027 * by the timestamp being before the start of the buffer.
4029 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4030 if (ts >= iter->array_buffer->time_start)
4033 ring_buffer_iter_advance(buf_iter);
4036 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4040 * The current tracer is copied to avoid a global locking
4043 static void *s_start(struct seq_file *m, loff_t *pos)
4045 struct trace_iterator *iter = m->private;
4046 struct trace_array *tr = iter->tr;
4047 int cpu_file = iter->cpu_file;
4053 * copy the tracer to avoid using a global lock all around.
4054 * iter->trace is a copy of current_trace, the pointer to the
4055 * name may be used instead of a strcmp(), as iter->trace->name
4056 * will point to the same string as current_trace->name.
4058 mutex_lock(&trace_types_lock);
4059 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4060 *iter->trace = *tr->current_trace;
4061 mutex_unlock(&trace_types_lock);
4063 #ifdef CONFIG_TRACER_MAX_TRACE
4064 if (iter->snapshot && iter->trace->use_max_tr)
4065 return ERR_PTR(-EBUSY);
4068 if (!iter->snapshot)
4069 atomic_inc(&trace_record_taskinfo_disabled);
4071 if (*pos != iter->pos) {
4076 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4077 for_each_tracing_cpu(cpu)
4078 tracing_iter_reset(iter, cpu);
4080 tracing_iter_reset(iter, cpu_file);
4083 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4088 * If we overflowed the seq_file before, then we want
4089 * to just reuse the trace_seq buffer again.
4095 p = s_next(m, p, &l);
4099 trace_event_read_lock();
4100 trace_access_lock(cpu_file);
4104 static void s_stop(struct seq_file *m, void *p)
4106 struct trace_iterator *iter = m->private;
4108 #ifdef CONFIG_TRACER_MAX_TRACE
4109 if (iter->snapshot && iter->trace->use_max_tr)
4113 if (!iter->snapshot)
4114 atomic_dec(&trace_record_taskinfo_disabled);
4116 trace_access_unlock(iter->cpu_file);
4117 trace_event_read_unlock();
4121 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4122 unsigned long *entries, int cpu)
4124 unsigned long count;
4126 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4128 * If this buffer has skipped entries, then we hold all
4129 * entries for the trace and we need to ignore the
4130 * ones before the time stamp.
4132 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4133 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4134 /* total is the same as the entries */
4138 ring_buffer_overrun_cpu(buf->buffer, cpu);
4143 get_total_entries(struct array_buffer *buf,
4144 unsigned long *total, unsigned long *entries)
4152 for_each_tracing_cpu(cpu) {
4153 get_total_entries_cpu(buf, &t, &e, cpu);
4159 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4161 unsigned long total, entries;
4166 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4171 unsigned long trace_total_entries(struct trace_array *tr)
4173 unsigned long total, entries;
4178 get_total_entries(&tr->array_buffer, &total, &entries);
4183 static void print_lat_help_header(struct seq_file *m)
4185 seq_puts(m, "# _------=> CPU# \n"
4186 "# / _-----=> irqs-off \n"
4187 "# | / _----=> need-resched \n"
4188 "# || / _---=> hardirq/softirq \n"
4189 "# ||| / _--=> preempt-depth \n"
4191 "# cmd pid ||||| time | caller \n"
4192 "# \\ / ||||| \\ | / \n");
4195 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4197 unsigned long total;
4198 unsigned long entries;
4200 get_total_entries(buf, &total, &entries);
4201 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4202 entries, total, num_online_cpus());
4206 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4209 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4211 print_event_info(buf, m);
4213 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4214 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4217 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4220 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4221 const char *space = " ";
4222 int prec = tgid ? 12 : 2;
4224 print_event_info(buf, m);
4226 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
4227 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4228 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4229 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4230 seq_printf(m, "# %.*s||| / delay\n", prec, space);
4231 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4232 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
4236 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4238 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4239 struct array_buffer *buf = iter->array_buffer;
4240 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4241 struct tracer *type = iter->trace;
4242 unsigned long entries;
4243 unsigned long total;
4244 const char *name = "preemption";
4248 get_total_entries(buf, &total, &entries);
4250 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4252 seq_puts(m, "# -----------------------------------"
4253 "---------------------------------\n");
4254 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4255 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4256 nsecs_to_usecs(data->saved_latency),
4260 #if defined(CONFIG_PREEMPT_NONE)
4262 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
4264 #elif defined(CONFIG_PREEMPT)
4266 #elif defined(CONFIG_PREEMPT_RT)
4271 /* These are reserved for later use */
4274 seq_printf(m, " #P:%d)\n", num_online_cpus());
4278 seq_puts(m, "# -----------------\n");
4279 seq_printf(m, "# | task: %.16s-%d "
4280 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4281 data->comm, data->pid,
4282 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4283 data->policy, data->rt_priority);
4284 seq_puts(m, "# -----------------\n");
4286 if (data->critical_start) {
4287 seq_puts(m, "# => started at: ");
4288 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4289 trace_print_seq(m, &iter->seq);
4290 seq_puts(m, "\n# => ended at: ");
4291 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4292 trace_print_seq(m, &iter->seq);
4293 seq_puts(m, "\n#\n");
4299 static void test_cpu_buff_start(struct trace_iterator *iter)
4301 struct trace_seq *s = &iter->seq;
4302 struct trace_array *tr = iter->tr;
4304 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4307 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4310 if (cpumask_available(iter->started) &&
4311 cpumask_test_cpu(iter->cpu, iter->started))
4314 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4317 if (cpumask_available(iter->started))
4318 cpumask_set_cpu(iter->cpu, iter->started);
4320 /* Don't print started cpu buffer for the first entry of the trace */
4322 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4326 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4328 struct trace_array *tr = iter->tr;
4329 struct trace_seq *s = &iter->seq;
4330 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4331 struct trace_entry *entry;
4332 struct trace_event *event;
4336 test_cpu_buff_start(iter);
4338 event = ftrace_find_event(entry->type);
4340 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4341 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4342 trace_print_lat_context(iter);
4344 trace_print_context(iter);
4347 if (trace_seq_has_overflowed(s))
4348 return TRACE_TYPE_PARTIAL_LINE;
4351 return event->funcs->trace(iter, sym_flags, event);
4353 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4355 return trace_handle_return(s);
4358 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4360 struct trace_array *tr = iter->tr;
4361 struct trace_seq *s = &iter->seq;
4362 struct trace_entry *entry;
4363 struct trace_event *event;
4367 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4368 trace_seq_printf(s, "%d %d %llu ",
4369 entry->pid, iter->cpu, iter->ts);
4371 if (trace_seq_has_overflowed(s))
4372 return TRACE_TYPE_PARTIAL_LINE;
4374 event = ftrace_find_event(entry->type);
4376 return event->funcs->raw(iter, 0, event);
4378 trace_seq_printf(s, "%d ?\n", entry->type);
4380 return trace_handle_return(s);
4383 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4385 struct trace_array *tr = iter->tr;
4386 struct trace_seq *s = &iter->seq;
4387 unsigned char newline = '\n';
4388 struct trace_entry *entry;
4389 struct trace_event *event;
4393 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4394 SEQ_PUT_HEX_FIELD(s, entry->pid);
4395 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4396 SEQ_PUT_HEX_FIELD(s, iter->ts);
4397 if (trace_seq_has_overflowed(s))
4398 return TRACE_TYPE_PARTIAL_LINE;
4401 event = ftrace_find_event(entry->type);
4403 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4404 if (ret != TRACE_TYPE_HANDLED)
4408 SEQ_PUT_FIELD(s, newline);
4410 return trace_handle_return(s);
4413 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4415 struct trace_array *tr = iter->tr;
4416 struct trace_seq *s = &iter->seq;
4417 struct trace_entry *entry;
4418 struct trace_event *event;
4422 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4423 SEQ_PUT_FIELD(s, entry->pid);
4424 SEQ_PUT_FIELD(s, iter->cpu);
4425 SEQ_PUT_FIELD(s, iter->ts);
4426 if (trace_seq_has_overflowed(s))
4427 return TRACE_TYPE_PARTIAL_LINE;
4430 event = ftrace_find_event(entry->type);
4431 return event ? event->funcs->binary(iter, 0, event) :
4435 int trace_empty(struct trace_iterator *iter)
4437 struct ring_buffer_iter *buf_iter;
4440 /* If we are looking at one CPU buffer, only check that one */
4441 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4442 cpu = iter->cpu_file;
4443 buf_iter = trace_buffer_iter(iter, cpu);
4445 if (!ring_buffer_iter_empty(buf_iter))
4448 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4454 for_each_tracing_cpu(cpu) {
4455 buf_iter = trace_buffer_iter(iter, cpu);
4457 if (!ring_buffer_iter_empty(buf_iter))
4460 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4468 /* Called with trace_event_read_lock() held. */
4469 enum print_line_t print_trace_line(struct trace_iterator *iter)
4471 struct trace_array *tr = iter->tr;
4472 unsigned long trace_flags = tr->trace_flags;
4473 enum print_line_t ret;
4475 if (iter->lost_events) {
4476 if (iter->lost_events == (unsigned long)-1)
4477 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4480 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4481 iter->cpu, iter->lost_events);
4482 if (trace_seq_has_overflowed(&iter->seq))
4483 return TRACE_TYPE_PARTIAL_LINE;
4486 if (iter->trace && iter->trace->print_line) {
4487 ret = iter->trace->print_line(iter);
4488 if (ret != TRACE_TYPE_UNHANDLED)
4492 if (iter->ent->type == TRACE_BPUTS &&
4493 trace_flags & TRACE_ITER_PRINTK &&
4494 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4495 return trace_print_bputs_msg_only(iter);
4497 if (iter->ent->type == TRACE_BPRINT &&
4498 trace_flags & TRACE_ITER_PRINTK &&
4499 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4500 return trace_print_bprintk_msg_only(iter);
4502 if (iter->ent->type == TRACE_PRINT &&
4503 trace_flags & TRACE_ITER_PRINTK &&
4504 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4505 return trace_print_printk_msg_only(iter);
4507 if (trace_flags & TRACE_ITER_BIN)
4508 return print_bin_fmt(iter);
4510 if (trace_flags & TRACE_ITER_HEX)
4511 return print_hex_fmt(iter);
4513 if (trace_flags & TRACE_ITER_RAW)
4514 return print_raw_fmt(iter);
4516 return print_trace_fmt(iter);
4519 void trace_latency_header(struct seq_file *m)
4521 struct trace_iterator *iter = m->private;
4522 struct trace_array *tr = iter->tr;
4524 /* print nothing if the buffers are empty */
4525 if (trace_empty(iter))
4528 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4529 print_trace_header(m, iter);
4531 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4532 print_lat_help_header(m);
4535 void trace_default_header(struct seq_file *m)
4537 struct trace_iterator *iter = m->private;
4538 struct trace_array *tr = iter->tr;
4539 unsigned long trace_flags = tr->trace_flags;
4541 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4544 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4545 /* print nothing if the buffers are empty */
4546 if (trace_empty(iter))
4548 print_trace_header(m, iter);
4549 if (!(trace_flags & TRACE_ITER_VERBOSE))
4550 print_lat_help_header(m);
4552 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4553 if (trace_flags & TRACE_ITER_IRQ_INFO)
4554 print_func_help_header_irq(iter->array_buffer,
4557 print_func_help_header(iter->array_buffer, m,
4563 static void test_ftrace_alive(struct seq_file *m)
4565 if (!ftrace_is_dead())
4567 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4568 "# MAY BE MISSING FUNCTION EVENTS\n");
4571 #ifdef CONFIG_TRACER_MAX_TRACE
4572 static void show_snapshot_main_help(struct seq_file *m)
4574 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4575 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4576 "# Takes a snapshot of the main buffer.\n"
4577 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4578 "# (Doesn't have to be '2' works with any number that\n"
4579 "# is not a '0' or '1')\n");
4582 static void show_snapshot_percpu_help(struct seq_file *m)
4584 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4585 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4586 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4587 "# Takes a snapshot of the main buffer for this cpu.\n");
4589 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4590 "# Must use main snapshot file to allocate.\n");
4592 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4593 "# (Doesn't have to be '2' works with any number that\n"
4594 "# is not a '0' or '1')\n");
4597 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4599 if (iter->tr->allocated_snapshot)
4600 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4602 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4604 seq_puts(m, "# Snapshot commands:\n");
4605 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4606 show_snapshot_main_help(m);
4608 show_snapshot_percpu_help(m);
4611 /* Should never be called */
4612 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4615 static int s_show(struct seq_file *m, void *v)
4617 struct trace_iterator *iter = v;
4620 if (iter->ent == NULL) {
4622 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4624 test_ftrace_alive(m);
4626 if (iter->snapshot && trace_empty(iter))
4627 print_snapshot_help(m, iter);
4628 else if (iter->trace && iter->trace->print_header)
4629 iter->trace->print_header(m);
4631 trace_default_header(m);
4633 } else if (iter->leftover) {
4635 * If we filled the seq_file buffer earlier, we
4636 * want to just show it now.
4638 ret = trace_print_seq(m, &iter->seq);
4640 /* ret should this time be zero, but you never know */
4641 iter->leftover = ret;
4644 print_trace_line(iter);
4645 ret = trace_print_seq(m, &iter->seq);
4647 * If we overflow the seq_file buffer, then it will
4648 * ask us for this data again at start up.
4650 * ret is 0 if seq_file write succeeded.
4653 iter->leftover = ret;
4660 * Should be used after trace_array_get(), trace_types_lock
4661 * ensures that i_cdev was already initialized.
4663 static inline int tracing_get_cpu(struct inode *inode)
4665 if (inode->i_cdev) /* See trace_create_cpu_file() */
4666 return (long)inode->i_cdev - 1;
4667 return RING_BUFFER_ALL_CPUS;
4670 static const struct seq_operations tracer_seq_ops = {
4677 static struct trace_iterator *
4678 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4680 struct trace_array *tr = inode->i_private;
4681 struct trace_iterator *iter;
4684 if (tracing_disabled)
4685 return ERR_PTR(-ENODEV);
4687 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4689 return ERR_PTR(-ENOMEM);
4691 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4693 if (!iter->buffer_iter)
4697 * trace_find_next_entry() may need to save off iter->ent.
4698 * It will place it into the iter->temp buffer. As most
4699 * events are less than 128, allocate a buffer of that size.
4700 * If one is greater, then trace_find_next_entry() will
4701 * allocate a new buffer to adjust for the bigger iter->ent.
4702 * It's not critical if it fails to get allocated here.
4704 iter->temp = kmalloc(128, GFP_KERNEL);
4706 iter->temp_size = 128;
4709 * trace_event_printf() may need to modify given format
4710 * string to replace %p with %px so that it shows real address
4711 * instead of hash value. However, that is only for the event
4712 * tracing, other tracer may not need. Defer the allocation
4713 * until it is needed.
4719 * We make a copy of the current tracer to avoid concurrent
4720 * changes on it while we are reading.
4722 mutex_lock(&trace_types_lock);
4723 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4727 *iter->trace = *tr->current_trace;
4729 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4734 #ifdef CONFIG_TRACER_MAX_TRACE
4735 /* Currently only the top directory has a snapshot */
4736 if (tr->current_trace->print_max || snapshot)
4737 iter->array_buffer = &tr->max_buffer;
4740 iter->array_buffer = &tr->array_buffer;
4741 iter->snapshot = snapshot;
4743 iter->cpu_file = tracing_get_cpu(inode);
4744 mutex_init(&iter->mutex);
4746 /* Notify the tracer early; before we stop tracing. */
4747 if (iter->trace->open)
4748 iter->trace->open(iter);
4750 /* Annotate start of buffers if we had overruns */
4751 if (ring_buffer_overruns(iter->array_buffer->buffer))
4752 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4754 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4755 if (trace_clocks[tr->clock_id].in_ns)
4756 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4759 * If pause-on-trace is enabled, then stop the trace while
4760 * dumping, unless this is the "snapshot" file
4762 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4763 tracing_stop_tr(tr);
4765 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4766 for_each_tracing_cpu(cpu) {
4767 iter->buffer_iter[cpu] =
4768 ring_buffer_read_prepare(iter->array_buffer->buffer,
4771 ring_buffer_read_prepare_sync();
4772 for_each_tracing_cpu(cpu) {
4773 ring_buffer_read_start(iter->buffer_iter[cpu]);
4774 tracing_iter_reset(iter, cpu);
4777 cpu = iter->cpu_file;
4778 iter->buffer_iter[cpu] =
4779 ring_buffer_read_prepare(iter->array_buffer->buffer,
4781 ring_buffer_read_prepare_sync();
4782 ring_buffer_read_start(iter->buffer_iter[cpu]);
4783 tracing_iter_reset(iter, cpu);
4786 mutex_unlock(&trace_types_lock);
4791 mutex_unlock(&trace_types_lock);
4794 kfree(iter->buffer_iter);
4796 seq_release_private(inode, file);
4797 return ERR_PTR(-ENOMEM);
4800 int tracing_open_generic(struct inode *inode, struct file *filp)
4804 ret = tracing_check_open_get_tr(NULL);
4808 filp->private_data = inode->i_private;
4812 bool tracing_is_disabled(void)
4814 return (tracing_disabled) ? true: false;
4818 * Open and update trace_array ref count.
4819 * Must have the current trace_array passed to it.
4821 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4823 struct trace_array *tr = inode->i_private;
4826 ret = tracing_check_open_get_tr(tr);
4830 filp->private_data = inode->i_private;
4835 static int tracing_release(struct inode *inode, struct file *file)
4837 struct trace_array *tr = inode->i_private;
4838 struct seq_file *m = file->private_data;
4839 struct trace_iterator *iter;
4842 if (!(file->f_mode & FMODE_READ)) {
4843 trace_array_put(tr);
4847 /* Writes do not use seq_file */
4849 mutex_lock(&trace_types_lock);
4851 for_each_tracing_cpu(cpu) {
4852 if (iter->buffer_iter[cpu])
4853 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4856 if (iter->trace && iter->trace->close)
4857 iter->trace->close(iter);
4859 if (!iter->snapshot && tr->stop_count)
4860 /* reenable tracing if it was previously enabled */
4861 tracing_start_tr(tr);
4863 __trace_array_put(tr);
4865 mutex_unlock(&trace_types_lock);
4867 mutex_destroy(&iter->mutex);
4868 free_cpumask_var(iter->started);
4872 kfree(iter->buffer_iter);
4873 seq_release_private(inode, file);
4878 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4880 struct trace_array *tr = inode->i_private;
4882 trace_array_put(tr);
4886 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4888 struct trace_array *tr = inode->i_private;
4890 trace_array_put(tr);
4892 return single_release(inode, file);
4895 static int tracing_open(struct inode *inode, struct file *file)
4897 struct trace_array *tr = inode->i_private;
4898 struct trace_iterator *iter;
4901 ret = tracing_check_open_get_tr(tr);
4905 /* If this file was open for write, then erase contents */
4906 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4907 int cpu = tracing_get_cpu(inode);
4908 struct array_buffer *trace_buf = &tr->array_buffer;
4910 #ifdef CONFIG_TRACER_MAX_TRACE
4911 if (tr->current_trace->print_max)
4912 trace_buf = &tr->max_buffer;
4915 if (cpu == RING_BUFFER_ALL_CPUS)
4916 tracing_reset_online_cpus(trace_buf);
4918 tracing_reset_cpu(trace_buf, cpu);
4921 if (file->f_mode & FMODE_READ) {
4922 iter = __tracing_open(inode, file, false);
4924 ret = PTR_ERR(iter);
4925 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4926 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4930 trace_array_put(tr);
4936 * Some tracers are not suitable for instance buffers.
4937 * A tracer is always available for the global array (toplevel)
4938 * or if it explicitly states that it is.
4941 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4943 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4946 /* Find the next tracer that this trace array may use */
4947 static struct tracer *
4948 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4950 while (t && !trace_ok_for_array(t, tr))
4957 t_next(struct seq_file *m, void *v, loff_t *pos)
4959 struct trace_array *tr = m->private;
4960 struct tracer *t = v;
4965 t = get_tracer_for_array(tr, t->next);
4970 static void *t_start(struct seq_file *m, loff_t *pos)
4972 struct trace_array *tr = m->private;
4976 mutex_lock(&trace_types_lock);
4978 t = get_tracer_for_array(tr, trace_types);
4979 for (; t && l < *pos; t = t_next(m, t, &l))
4985 static void t_stop(struct seq_file *m, void *p)
4987 mutex_unlock(&trace_types_lock);
4990 static int t_show(struct seq_file *m, void *v)
4992 struct tracer *t = v;
4997 seq_puts(m, t->name);
5006 static const struct seq_operations show_traces_seq_ops = {
5013 static int show_traces_open(struct inode *inode, struct file *file)
5015 struct trace_array *tr = inode->i_private;
5019 ret = tracing_check_open_get_tr(tr);
5023 ret = seq_open(file, &show_traces_seq_ops);
5025 trace_array_put(tr);
5029 m = file->private_data;
5035 static int show_traces_release(struct inode *inode, struct file *file)
5037 struct trace_array *tr = inode->i_private;
5039 trace_array_put(tr);
5040 return seq_release(inode, file);
5044 tracing_write_stub(struct file *filp, const char __user *ubuf,
5045 size_t count, loff_t *ppos)
5050 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5054 if (file->f_mode & FMODE_READ)
5055 ret = seq_lseek(file, offset, whence);
5057 file->f_pos = ret = 0;
5062 static const struct file_operations tracing_fops = {
5063 .open = tracing_open,
5065 .write = tracing_write_stub,
5066 .llseek = tracing_lseek,
5067 .release = tracing_release,
5070 static const struct file_operations show_traces_fops = {
5071 .open = show_traces_open,
5073 .llseek = seq_lseek,
5074 .release = show_traces_release,
5078 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5079 size_t count, loff_t *ppos)
5081 struct trace_array *tr = file_inode(filp)->i_private;
5085 len = snprintf(NULL, 0, "%*pb\n",
5086 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5087 mask_str = kmalloc(len, GFP_KERNEL);
5091 len = snprintf(mask_str, len, "%*pb\n",
5092 cpumask_pr_args(tr->tracing_cpumask));
5097 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5105 int tracing_set_cpumask(struct trace_array *tr,
5106 cpumask_var_t tracing_cpumask_new)
5113 local_irq_disable();
5114 arch_spin_lock(&tr->max_lock);
5115 for_each_tracing_cpu(cpu) {
5117 * Increase/decrease the disabled counter if we are
5118 * about to flip a bit in the cpumask:
5120 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5121 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5122 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5123 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5125 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5126 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5127 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5128 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5131 arch_spin_unlock(&tr->max_lock);
5134 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5140 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5141 size_t count, loff_t *ppos)
5143 struct trace_array *tr = file_inode(filp)->i_private;
5144 cpumask_var_t tracing_cpumask_new;
5147 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5150 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5154 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5158 free_cpumask_var(tracing_cpumask_new);
5163 free_cpumask_var(tracing_cpumask_new);
5168 static const struct file_operations tracing_cpumask_fops = {
5169 .open = tracing_open_generic_tr,
5170 .read = tracing_cpumask_read,
5171 .write = tracing_cpumask_write,
5172 .release = tracing_release_generic_tr,
5173 .llseek = generic_file_llseek,
5176 static int tracing_trace_options_show(struct seq_file *m, void *v)
5178 struct tracer_opt *trace_opts;
5179 struct trace_array *tr = m->private;
5183 mutex_lock(&trace_types_lock);
5184 tracer_flags = tr->current_trace->flags->val;
5185 trace_opts = tr->current_trace->flags->opts;
5187 for (i = 0; trace_options[i]; i++) {
5188 if (tr->trace_flags & (1 << i))
5189 seq_printf(m, "%s\n", trace_options[i]);
5191 seq_printf(m, "no%s\n", trace_options[i]);
5194 for (i = 0; trace_opts[i].name; i++) {
5195 if (tracer_flags & trace_opts[i].bit)
5196 seq_printf(m, "%s\n", trace_opts[i].name);
5198 seq_printf(m, "no%s\n", trace_opts[i].name);
5200 mutex_unlock(&trace_types_lock);
5205 static int __set_tracer_option(struct trace_array *tr,
5206 struct tracer_flags *tracer_flags,
5207 struct tracer_opt *opts, int neg)
5209 struct tracer *trace = tracer_flags->trace;
5212 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5217 tracer_flags->val &= ~opts->bit;
5219 tracer_flags->val |= opts->bit;
5223 /* Try to assign a tracer specific option */
5224 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5226 struct tracer *trace = tr->current_trace;
5227 struct tracer_flags *tracer_flags = trace->flags;
5228 struct tracer_opt *opts = NULL;
5231 for (i = 0; tracer_flags->opts[i].name; i++) {
5232 opts = &tracer_flags->opts[i];
5234 if (strcmp(cmp, opts->name) == 0)
5235 return __set_tracer_option(tr, trace->flags, opts, neg);
5241 /* Some tracers require overwrite to stay enabled */
5242 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5244 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5250 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5254 if ((mask == TRACE_ITER_RECORD_TGID) ||
5255 (mask == TRACE_ITER_RECORD_CMD))
5256 lockdep_assert_held(&event_mutex);
5258 /* do nothing if flag is already set */
5259 if (!!(tr->trace_flags & mask) == !!enabled)
5262 /* Give the tracer a chance to approve the change */
5263 if (tr->current_trace->flag_changed)
5264 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5268 tr->trace_flags |= mask;
5270 tr->trace_flags &= ~mask;
5272 if (mask == TRACE_ITER_RECORD_CMD)
5273 trace_event_enable_cmd_record(enabled);
5275 if (mask == TRACE_ITER_RECORD_TGID) {
5277 tgid_map_max = pid_max;
5278 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5282 * Pairs with smp_load_acquire() in
5283 * trace_find_tgid_ptr() to ensure that if it observes
5284 * the tgid_map we just allocated then it also observes
5285 * the corresponding tgid_map_max value.
5287 smp_store_release(&tgid_map, map);
5290 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5294 trace_event_enable_tgid_record(enabled);
5297 if (mask == TRACE_ITER_EVENT_FORK)
5298 trace_event_follow_fork(tr, enabled);
5300 if (mask == TRACE_ITER_FUNC_FORK)
5301 ftrace_pid_follow_fork(tr, enabled);
5303 if (mask == TRACE_ITER_OVERWRITE) {
5304 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5305 #ifdef CONFIG_TRACER_MAX_TRACE
5306 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5310 if (mask == TRACE_ITER_PRINTK) {
5311 trace_printk_start_stop_comm(enabled);
5312 trace_printk_control(enabled);
5318 int trace_set_options(struct trace_array *tr, char *option)
5323 size_t orig_len = strlen(option);
5326 cmp = strstrip(option);
5328 len = str_has_prefix(cmp, "no");
5334 mutex_lock(&event_mutex);
5335 mutex_lock(&trace_types_lock);
5337 ret = match_string(trace_options, -1, cmp);
5338 /* If no option could be set, test the specific tracer options */
5340 ret = set_tracer_option(tr, cmp, neg);
5342 ret = set_tracer_flag(tr, 1 << ret, !neg);
5344 mutex_unlock(&trace_types_lock);
5345 mutex_unlock(&event_mutex);
5348 * If the first trailing whitespace is replaced with '\0' by strstrip,
5349 * turn it back into a space.
5351 if (orig_len > strlen(option))
5352 option[strlen(option)] = ' ';
5357 static void __init apply_trace_boot_options(void)
5359 char *buf = trace_boot_options_buf;
5363 option = strsep(&buf, ",");
5369 trace_set_options(&global_trace, option);
5371 /* Put back the comma to allow this to be called again */
5378 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5379 size_t cnt, loff_t *ppos)
5381 struct seq_file *m = filp->private_data;
5382 struct trace_array *tr = m->private;
5386 if (cnt >= sizeof(buf))
5389 if (copy_from_user(buf, ubuf, cnt))
5394 ret = trace_set_options(tr, buf);
5403 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5405 struct trace_array *tr = inode->i_private;
5408 ret = tracing_check_open_get_tr(tr);
5412 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5414 trace_array_put(tr);
5419 static const struct file_operations tracing_iter_fops = {
5420 .open = tracing_trace_options_open,
5422 .llseek = seq_lseek,
5423 .release = tracing_single_release_tr,
5424 .write = tracing_trace_options_write,
5427 static const char readme_msg[] =
5428 "tracing mini-HOWTO:\n\n"
5429 "# echo 0 > tracing_on : quick way to disable tracing\n"
5430 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5431 " Important files:\n"
5432 " trace\t\t\t- The static contents of the buffer\n"
5433 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5434 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5435 " current_tracer\t- function and latency tracers\n"
5436 " available_tracers\t- list of configured tracers for current_tracer\n"
5437 " error_log\t- error log for failed commands (that support it)\n"
5438 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5439 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5440 " trace_clock\t\t-change the clock used to order events\n"
5441 " local: Per cpu clock but may not be synced across CPUs\n"
5442 " global: Synced across CPUs but slows tracing down.\n"
5443 " counter: Not a clock, but just an increment\n"
5444 " uptime: Jiffy counter from time of boot\n"
5445 " perf: Same clock that perf events use\n"
5446 #ifdef CONFIG_X86_64
5447 " x86-tsc: TSC cycle counter\n"
5449 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5450 " delta: Delta difference against a buffer-wide timestamp\n"
5451 " absolute: Absolute (standalone) timestamp\n"
5452 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5453 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5454 " tracing_cpumask\t- Limit which CPUs to trace\n"
5455 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5456 "\t\t\t Remove sub-buffer with rmdir\n"
5457 " trace_options\t\t- Set format or modify how tracing happens\n"
5458 "\t\t\t Disable an option by prefixing 'no' to the\n"
5459 "\t\t\t option name\n"
5460 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5461 #ifdef CONFIG_DYNAMIC_FTRACE
5462 "\n available_filter_functions - list of functions that can be filtered on\n"
5463 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5464 "\t\t\t functions\n"
5465 "\t accepts: func_full_name or glob-matching-pattern\n"
5466 "\t modules: Can select a group via module\n"
5467 "\t Format: :mod:<module-name>\n"
5468 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5469 "\t triggers: a command to perform when function is hit\n"
5470 "\t Format: <function>:<trigger>[:count]\n"
5471 "\t trigger: traceon, traceoff\n"
5472 "\t\t enable_event:<system>:<event>\n"
5473 "\t\t disable_event:<system>:<event>\n"
5474 #ifdef CONFIG_STACKTRACE
5477 #ifdef CONFIG_TRACER_SNAPSHOT
5482 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5483 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5484 "\t The first one will disable tracing every time do_fault is hit\n"
5485 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5486 "\t The first time do trap is hit and it disables tracing, the\n"
5487 "\t counter will decrement to 2. If tracing is already disabled,\n"
5488 "\t the counter will not decrement. It only decrements when the\n"
5489 "\t trigger did work\n"
5490 "\t To remove trigger without count:\n"
5491 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5492 "\t To remove trigger with a count:\n"
5493 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5494 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5495 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5496 "\t modules: Can select a group via module command :mod:\n"
5497 "\t Does not accept triggers\n"
5498 #endif /* CONFIG_DYNAMIC_FTRACE */
5499 #ifdef CONFIG_FUNCTION_TRACER
5500 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5502 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5505 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5506 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5507 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5508 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5510 #ifdef CONFIG_TRACER_SNAPSHOT
5511 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5512 "\t\t\t snapshot buffer. Read the contents for more\n"
5513 "\t\t\t information\n"
5515 #ifdef CONFIG_STACK_TRACER
5516 " stack_trace\t\t- Shows the max stack trace when active\n"
5517 " stack_max_size\t- Shows current max stack size that was traced\n"
5518 "\t\t\t Write into this file to reset the max size (trigger a\n"
5519 "\t\t\t new trace)\n"
5520 #ifdef CONFIG_DYNAMIC_FTRACE
5521 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5524 #endif /* CONFIG_STACK_TRACER */
5525 #ifdef CONFIG_DYNAMIC_EVENTS
5526 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5527 "\t\t\t Write into this file to define/undefine new trace events.\n"
5529 #ifdef CONFIG_KPROBE_EVENTS
5530 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5531 "\t\t\t Write into this file to define/undefine new trace events.\n"
5533 #ifdef CONFIG_UPROBE_EVENTS
5534 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5535 "\t\t\t Write into this file to define/undefine new trace events.\n"
5537 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5538 "\t accepts: event-definitions (one definition per line)\n"
5539 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5540 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5541 #ifdef CONFIG_HIST_TRIGGERS
5542 "\t s:[synthetic/]<event> <field> [<field>]\n"
5544 "\t -:[<group>/]<event>\n"
5545 #ifdef CONFIG_KPROBE_EVENTS
5546 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5547 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5549 #ifdef CONFIG_UPROBE_EVENTS
5550 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5552 "\t args: <name>=fetcharg[:type]\n"
5553 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5554 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5555 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5557 "\t $stack<index>, $stack, $retval, $comm,\n"
5559 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5560 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5561 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5562 "\t <type>\\[<array-size>\\]\n"
5563 #ifdef CONFIG_HIST_TRIGGERS
5564 "\t field: <stype> <name>;\n"
5565 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5566 "\t [unsigned] char/int/long\n"
5569 " events/\t\t- Directory containing all trace event subsystems:\n"
5570 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5571 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5572 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5574 " filter\t\t- If set, only events passing filter are traced\n"
5575 " events/<system>/<event>/\t- Directory containing control files for\n"
5577 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5578 " filter\t\t- If set, only events passing filter are traced\n"
5579 " trigger\t\t- If set, a command to perform when event is hit\n"
5580 "\t Format: <trigger>[:count][if <filter>]\n"
5581 "\t trigger: traceon, traceoff\n"
5582 "\t enable_event:<system>:<event>\n"
5583 "\t disable_event:<system>:<event>\n"
5584 #ifdef CONFIG_HIST_TRIGGERS
5585 "\t enable_hist:<system>:<event>\n"
5586 "\t disable_hist:<system>:<event>\n"
5588 #ifdef CONFIG_STACKTRACE
5591 #ifdef CONFIG_TRACER_SNAPSHOT
5594 #ifdef CONFIG_HIST_TRIGGERS
5595 "\t\t hist (see below)\n"
5597 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5598 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5599 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5600 "\t events/block/block_unplug/trigger\n"
5601 "\t The first disables tracing every time block_unplug is hit.\n"
5602 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5603 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5604 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5605 "\t Like function triggers, the counter is only decremented if it\n"
5606 "\t enabled or disabled tracing.\n"
5607 "\t To remove a trigger without a count:\n"
5608 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5609 "\t To remove a trigger with a count:\n"
5610 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5611 "\t Filters can be ignored when removing a trigger.\n"
5612 #ifdef CONFIG_HIST_TRIGGERS
5613 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5614 "\t Format: hist:keys=<field1[,field2,...]>\n"
5615 "\t [:values=<field1[,field2,...]>]\n"
5616 "\t [:sort=<field1[,field2,...]>]\n"
5617 "\t [:size=#entries]\n"
5618 "\t [:pause][:continue][:clear]\n"
5619 "\t [:name=histname1]\n"
5620 "\t [:<handler>.<action>]\n"
5621 "\t [if <filter>]\n\n"
5622 "\t When a matching event is hit, an entry is added to a hash\n"
5623 "\t table using the key(s) and value(s) named, and the value of a\n"
5624 "\t sum called 'hitcount' is incremented. Keys and values\n"
5625 "\t correspond to fields in the event's format description. Keys\n"
5626 "\t can be any field, or the special string 'stacktrace'.\n"
5627 "\t Compound keys consisting of up to two fields can be specified\n"
5628 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5629 "\t fields. Sort keys consisting of up to two fields can be\n"
5630 "\t specified using the 'sort' keyword. The sort direction can\n"
5631 "\t be modified by appending '.descending' or '.ascending' to a\n"
5632 "\t sort field. The 'size' parameter can be used to specify more\n"
5633 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5634 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5635 "\t its histogram data will be shared with other triggers of the\n"
5636 "\t same name, and trigger hits will update this common data.\n\n"
5637 "\t Reading the 'hist' file for the event will dump the hash\n"
5638 "\t table in its entirety to stdout. If there are multiple hist\n"
5639 "\t triggers attached to an event, there will be a table for each\n"
5640 "\t trigger in the output. The table displayed for a named\n"
5641 "\t trigger will be the same as any other instance having the\n"
5642 "\t same name. The default format used to display a given field\n"
5643 "\t can be modified by appending any of the following modifiers\n"
5644 "\t to the field name, as applicable:\n\n"
5645 "\t .hex display a number as a hex value\n"
5646 "\t .sym display an address as a symbol\n"
5647 "\t .sym-offset display an address as a symbol and offset\n"
5648 "\t .execname display a common_pid as a program name\n"
5649 "\t .syscall display a syscall id as a syscall name\n"
5650 "\t .log2 display log2 value rather than raw number\n"
5651 "\t .usecs display a common_timestamp in microseconds\n\n"
5652 "\t The 'pause' parameter can be used to pause an existing hist\n"
5653 "\t trigger or to start a hist trigger but not log any events\n"
5654 "\t until told to do so. 'continue' can be used to start or\n"
5655 "\t restart a paused hist trigger.\n\n"
5656 "\t The 'clear' parameter will clear the contents of a running\n"
5657 "\t hist trigger and leave its current paused/active state\n"
5659 "\t The enable_hist and disable_hist triggers can be used to\n"
5660 "\t have one event conditionally start and stop another event's\n"
5661 "\t already-attached hist trigger. The syntax is analogous to\n"
5662 "\t the enable_event and disable_event triggers.\n\n"
5663 "\t Hist trigger handlers and actions are executed whenever a\n"
5664 "\t a histogram entry is added or updated. They take the form:\n\n"
5665 "\t <handler>.<action>\n\n"
5666 "\t The available handlers are:\n\n"
5667 "\t onmatch(matching.event) - invoke on addition or update\n"
5668 "\t onmax(var) - invoke if var exceeds current max\n"
5669 "\t onchange(var) - invoke action if var changes\n\n"
5670 "\t The available actions are:\n\n"
5671 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5672 "\t save(field,...) - save current event fields\n"
5673 #ifdef CONFIG_TRACER_SNAPSHOT
5674 "\t snapshot() - snapshot the trace buffer\n\n"
5676 #ifdef CONFIG_SYNTH_EVENTS
5677 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5678 "\t Write into this file to define/undefine new synthetic events.\n"
5679 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5685 tracing_readme_read(struct file *filp, char __user *ubuf,
5686 size_t cnt, loff_t *ppos)
5688 return simple_read_from_buffer(ubuf, cnt, ppos,
5689 readme_msg, strlen(readme_msg));
5692 static const struct file_operations tracing_readme_fops = {
5693 .open = tracing_open_generic,
5694 .read = tracing_readme_read,
5695 .llseek = generic_file_llseek,
5698 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5702 return trace_find_tgid_ptr(pid);
5705 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5709 return trace_find_tgid_ptr(pid);
5712 static void saved_tgids_stop(struct seq_file *m, void *v)
5716 static int saved_tgids_show(struct seq_file *m, void *v)
5718 int *entry = (int *)v;
5719 int pid = entry - tgid_map;
5725 seq_printf(m, "%d %d\n", pid, tgid);
5729 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5730 .start = saved_tgids_start,
5731 .stop = saved_tgids_stop,
5732 .next = saved_tgids_next,
5733 .show = saved_tgids_show,
5736 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5740 ret = tracing_check_open_get_tr(NULL);
5744 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5748 static const struct file_operations tracing_saved_tgids_fops = {
5749 .open = tracing_saved_tgids_open,
5751 .llseek = seq_lseek,
5752 .release = seq_release,
5755 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5757 unsigned int *ptr = v;
5759 if (*pos || m->count)
5764 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5766 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5775 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5781 arch_spin_lock(&trace_cmdline_lock);
5783 v = &savedcmd->map_cmdline_to_pid[0];
5785 v = saved_cmdlines_next(m, v, &l);
5793 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5795 arch_spin_unlock(&trace_cmdline_lock);
5799 static int saved_cmdlines_show(struct seq_file *m, void *v)
5801 char buf[TASK_COMM_LEN];
5802 unsigned int *pid = v;
5804 __trace_find_cmdline(*pid, buf);
5805 seq_printf(m, "%d %s\n", *pid, buf);
5809 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5810 .start = saved_cmdlines_start,
5811 .next = saved_cmdlines_next,
5812 .stop = saved_cmdlines_stop,
5813 .show = saved_cmdlines_show,
5816 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5820 ret = tracing_check_open_get_tr(NULL);
5824 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5827 static const struct file_operations tracing_saved_cmdlines_fops = {
5828 .open = tracing_saved_cmdlines_open,
5830 .llseek = seq_lseek,
5831 .release = seq_release,
5835 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5836 size_t cnt, loff_t *ppos)
5841 arch_spin_lock(&trace_cmdline_lock);
5842 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5843 arch_spin_unlock(&trace_cmdline_lock);
5845 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5848 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5850 kfree(s->saved_cmdlines);
5851 kfree(s->map_cmdline_to_pid);
5855 static int tracing_resize_saved_cmdlines(unsigned int val)
5857 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5859 s = kmalloc(sizeof(*s), GFP_KERNEL);
5863 if (allocate_cmdlines_buffer(val, s) < 0) {
5868 arch_spin_lock(&trace_cmdline_lock);
5869 savedcmd_temp = savedcmd;
5871 arch_spin_unlock(&trace_cmdline_lock);
5872 free_saved_cmdlines_buffer(savedcmd_temp);
5878 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5879 size_t cnt, loff_t *ppos)
5884 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5888 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5889 if (!val || val > PID_MAX_DEFAULT)
5892 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5901 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5902 .open = tracing_open_generic,
5903 .read = tracing_saved_cmdlines_size_read,
5904 .write = tracing_saved_cmdlines_size_write,
5907 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5908 static union trace_eval_map_item *
5909 update_eval_map(union trace_eval_map_item *ptr)
5911 if (!ptr->map.eval_string) {
5912 if (ptr->tail.next) {
5913 ptr = ptr->tail.next;
5914 /* Set ptr to the next real item (skip head) */
5922 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5924 union trace_eval_map_item *ptr = v;
5927 * Paranoid! If ptr points to end, we don't want to increment past it.
5928 * This really should never happen.
5931 ptr = update_eval_map(ptr);
5932 if (WARN_ON_ONCE(!ptr))
5936 ptr = update_eval_map(ptr);
5941 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5943 union trace_eval_map_item *v;
5946 mutex_lock(&trace_eval_mutex);
5948 v = trace_eval_maps;
5952 while (v && l < *pos) {
5953 v = eval_map_next(m, v, &l);
5959 static void eval_map_stop(struct seq_file *m, void *v)
5961 mutex_unlock(&trace_eval_mutex);
5964 static int eval_map_show(struct seq_file *m, void *v)
5966 union trace_eval_map_item *ptr = v;
5968 seq_printf(m, "%s %ld (%s)\n",
5969 ptr->map.eval_string, ptr->map.eval_value,
5975 static const struct seq_operations tracing_eval_map_seq_ops = {
5976 .start = eval_map_start,
5977 .next = eval_map_next,
5978 .stop = eval_map_stop,
5979 .show = eval_map_show,
5982 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5986 ret = tracing_check_open_get_tr(NULL);
5990 return seq_open(filp, &tracing_eval_map_seq_ops);
5993 static const struct file_operations tracing_eval_map_fops = {
5994 .open = tracing_eval_map_open,
5996 .llseek = seq_lseek,
5997 .release = seq_release,
6000 static inline union trace_eval_map_item *
6001 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6003 /* Return tail of array given the head */
6004 return ptr + ptr->head.length + 1;
6008 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6011 struct trace_eval_map **stop;
6012 struct trace_eval_map **map;
6013 union trace_eval_map_item *map_array;
6014 union trace_eval_map_item *ptr;
6019 * The trace_eval_maps contains the map plus a head and tail item,
6020 * where the head holds the module and length of array, and the
6021 * tail holds a pointer to the next list.
6023 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6025 pr_warn("Unable to allocate trace eval mapping\n");
6029 mutex_lock(&trace_eval_mutex);
6031 if (!trace_eval_maps)
6032 trace_eval_maps = map_array;
6034 ptr = trace_eval_maps;
6036 ptr = trace_eval_jmp_to_tail(ptr);
6037 if (!ptr->tail.next)
6039 ptr = ptr->tail.next;
6042 ptr->tail.next = map_array;
6044 map_array->head.mod = mod;
6045 map_array->head.length = len;
6048 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6049 map_array->map = **map;
6052 memset(map_array, 0, sizeof(*map_array));
6054 mutex_unlock(&trace_eval_mutex);
6057 static void trace_create_eval_file(struct dentry *d_tracer)
6059 trace_create_file("eval_map", 0444, d_tracer,
6060 NULL, &tracing_eval_map_fops);
6063 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6064 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6065 static inline void trace_insert_eval_map_file(struct module *mod,
6066 struct trace_eval_map **start, int len) { }
6067 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6069 static void trace_insert_eval_map(struct module *mod,
6070 struct trace_eval_map **start, int len)
6072 struct trace_eval_map **map;
6079 trace_event_eval_update(map, len);
6081 trace_insert_eval_map_file(mod, start, len);
6085 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6086 size_t cnt, loff_t *ppos)
6088 struct trace_array *tr = filp->private_data;
6089 char buf[MAX_TRACER_SIZE+2];
6092 mutex_lock(&trace_types_lock);
6093 r = sprintf(buf, "%s\n", tr->current_trace->name);
6094 mutex_unlock(&trace_types_lock);
6096 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6099 int tracer_init(struct tracer *t, struct trace_array *tr)
6101 tracing_reset_online_cpus(&tr->array_buffer);
6105 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6109 for_each_tracing_cpu(cpu)
6110 per_cpu_ptr(buf->data, cpu)->entries = val;
6113 #ifdef CONFIG_TRACER_MAX_TRACE
6114 /* resize @tr's buffer to the size of @size_tr's entries */
6115 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6116 struct array_buffer *size_buf, int cpu_id)
6120 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6121 for_each_tracing_cpu(cpu) {
6122 ret = ring_buffer_resize(trace_buf->buffer,
6123 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6126 per_cpu_ptr(trace_buf->data, cpu)->entries =
6127 per_cpu_ptr(size_buf->data, cpu)->entries;
6130 ret = ring_buffer_resize(trace_buf->buffer,
6131 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6133 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6134 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6139 #endif /* CONFIG_TRACER_MAX_TRACE */
6141 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6142 unsigned long size, int cpu)
6147 * If kernel or user changes the size of the ring buffer
6148 * we use the size that was given, and we can forget about
6149 * expanding it later.
6151 ring_buffer_expanded = true;
6153 /* May be called before buffers are initialized */
6154 if (!tr->array_buffer.buffer)
6157 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6161 #ifdef CONFIG_TRACER_MAX_TRACE
6162 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6163 !tr->current_trace->use_max_tr)
6166 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6168 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6169 &tr->array_buffer, cpu);
6172 * AARGH! We are left with different
6173 * size max buffer!!!!
6174 * The max buffer is our "snapshot" buffer.
6175 * When a tracer needs a snapshot (one of the
6176 * latency tracers), it swaps the max buffer
6177 * with the saved snap shot. We succeeded to
6178 * update the size of the main buffer, but failed to
6179 * update the size of the max buffer. But when we tried
6180 * to reset the main buffer to the original size, we
6181 * failed there too. This is very unlikely to
6182 * happen, but if it does, warn and kill all
6186 tracing_disabled = 1;
6191 if (cpu == RING_BUFFER_ALL_CPUS)
6192 set_buffer_entries(&tr->max_buffer, size);
6194 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6197 #endif /* CONFIG_TRACER_MAX_TRACE */
6199 if (cpu == RING_BUFFER_ALL_CPUS)
6200 set_buffer_entries(&tr->array_buffer, size);
6202 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6207 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6208 unsigned long size, int cpu_id)
6212 mutex_lock(&trace_types_lock);
6214 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6215 /* make sure, this cpu is enabled in the mask */
6216 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6222 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6227 mutex_unlock(&trace_types_lock);
6234 * tracing_update_buffers - used by tracing facility to expand ring buffers
6236 * To save on memory when the tracing is never used on a system with it
6237 * configured in. The ring buffers are set to a minimum size. But once
6238 * a user starts to use the tracing facility, then they need to grow
6239 * to their default size.
6241 * This function is to be called when a tracer is about to be used.
6243 int tracing_update_buffers(void)
6247 mutex_lock(&trace_types_lock);
6248 if (!ring_buffer_expanded)
6249 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6250 RING_BUFFER_ALL_CPUS);
6251 mutex_unlock(&trace_types_lock);
6256 struct trace_option_dentry;
6259 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6262 * Used to clear out the tracer before deletion of an instance.
6263 * Must have trace_types_lock held.
6265 static void tracing_set_nop(struct trace_array *tr)
6267 if (tr->current_trace == &nop_trace)
6270 tr->current_trace->enabled--;
6272 if (tr->current_trace->reset)
6273 tr->current_trace->reset(tr);
6275 tr->current_trace = &nop_trace;
6278 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6280 /* Only enable if the directory has been created already. */
6284 create_trace_option_files(tr, t);
6287 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6290 #ifdef CONFIG_TRACER_MAX_TRACE
6295 mutex_lock(&trace_types_lock);
6297 if (!ring_buffer_expanded) {
6298 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6299 RING_BUFFER_ALL_CPUS);
6305 for (t = trace_types; t; t = t->next) {
6306 if (strcmp(t->name, buf) == 0)
6313 if (t == tr->current_trace)
6316 #ifdef CONFIG_TRACER_SNAPSHOT
6317 if (t->use_max_tr) {
6318 arch_spin_lock(&tr->max_lock);
6319 if (tr->cond_snapshot)
6321 arch_spin_unlock(&tr->max_lock);
6326 /* Some tracers won't work on kernel command line */
6327 if (system_state < SYSTEM_RUNNING && t->noboot) {
6328 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6333 /* Some tracers are only allowed for the top level buffer */
6334 if (!trace_ok_for_array(t, tr)) {
6339 /* If trace pipe files are being read, we can't change the tracer */
6340 if (tr->trace_ref) {
6345 trace_branch_disable();
6347 tr->current_trace->enabled--;
6349 if (tr->current_trace->reset)
6350 tr->current_trace->reset(tr);
6352 /* Current trace needs to be nop_trace before synchronize_rcu */
6353 tr->current_trace = &nop_trace;
6355 #ifdef CONFIG_TRACER_MAX_TRACE
6356 had_max_tr = tr->allocated_snapshot;
6358 if (had_max_tr && !t->use_max_tr) {
6360 * We need to make sure that the update_max_tr sees that
6361 * current_trace changed to nop_trace to keep it from
6362 * swapping the buffers after we resize it.
6363 * The update_max_tr is called from interrupts disabled
6364 * so a synchronized_sched() is sufficient.
6371 #ifdef CONFIG_TRACER_MAX_TRACE
6372 if (t->use_max_tr && !had_max_tr) {
6373 ret = tracing_alloc_snapshot_instance(tr);
6380 ret = tracer_init(t, tr);
6385 tr->current_trace = t;
6386 tr->current_trace->enabled++;
6387 trace_branch_enable(tr);
6389 mutex_unlock(&trace_types_lock);
6395 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6396 size_t cnt, loff_t *ppos)
6398 struct trace_array *tr = filp->private_data;
6399 char buf[MAX_TRACER_SIZE+1];
6406 if (cnt > MAX_TRACER_SIZE)
6407 cnt = MAX_TRACER_SIZE;
6409 if (copy_from_user(buf, ubuf, cnt))
6414 /* strip ending whitespace. */
6415 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6418 err = tracing_set_tracer(tr, buf);
6428 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6429 size_t cnt, loff_t *ppos)
6434 r = snprintf(buf, sizeof(buf), "%ld\n",
6435 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6436 if (r > sizeof(buf))
6438 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6442 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6443 size_t cnt, loff_t *ppos)
6448 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6458 tracing_thresh_read(struct file *filp, char __user *ubuf,
6459 size_t cnt, loff_t *ppos)
6461 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6465 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6466 size_t cnt, loff_t *ppos)
6468 struct trace_array *tr = filp->private_data;
6471 mutex_lock(&trace_types_lock);
6472 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6476 if (tr->current_trace->update_thresh) {
6477 ret = tr->current_trace->update_thresh(tr);
6484 mutex_unlock(&trace_types_lock);
6489 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6492 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6493 size_t cnt, loff_t *ppos)
6495 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6499 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6500 size_t cnt, loff_t *ppos)
6502 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6507 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6509 struct trace_array *tr = inode->i_private;
6510 struct trace_iterator *iter;
6513 ret = tracing_check_open_get_tr(tr);
6517 mutex_lock(&trace_types_lock);
6519 /* create a buffer to store the information to pass to userspace */
6520 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6523 __trace_array_put(tr);
6527 trace_seq_init(&iter->seq);
6528 iter->trace = tr->current_trace;
6530 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6535 /* trace pipe does not show start of buffer */
6536 cpumask_setall(iter->started);
6538 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6539 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6541 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6542 if (trace_clocks[tr->clock_id].in_ns)
6543 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6546 iter->array_buffer = &tr->array_buffer;
6547 iter->cpu_file = tracing_get_cpu(inode);
6548 mutex_init(&iter->mutex);
6549 filp->private_data = iter;
6551 if (iter->trace->pipe_open)
6552 iter->trace->pipe_open(iter);
6554 nonseekable_open(inode, filp);
6558 mutex_unlock(&trace_types_lock);
6563 __trace_array_put(tr);
6564 mutex_unlock(&trace_types_lock);
6568 static int tracing_release_pipe(struct inode *inode, struct file *file)
6570 struct trace_iterator *iter = file->private_data;
6571 struct trace_array *tr = inode->i_private;
6573 mutex_lock(&trace_types_lock);
6577 if (iter->trace->pipe_close)
6578 iter->trace->pipe_close(iter);
6580 mutex_unlock(&trace_types_lock);
6582 free_cpumask_var(iter->started);
6583 mutex_destroy(&iter->mutex);
6586 trace_array_put(tr);
6592 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6594 struct trace_array *tr = iter->tr;
6596 /* Iterators are static, they should be filled or empty */
6597 if (trace_buffer_iter(iter, iter->cpu_file))
6598 return EPOLLIN | EPOLLRDNORM;
6600 if (tr->trace_flags & TRACE_ITER_BLOCK)
6602 * Always select as readable when in blocking mode
6604 return EPOLLIN | EPOLLRDNORM;
6606 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6611 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6613 struct trace_iterator *iter = filp->private_data;
6615 return trace_poll(iter, filp, poll_table);
6618 /* Must be called with iter->mutex held. */
6619 static int tracing_wait_pipe(struct file *filp)
6621 struct trace_iterator *iter = filp->private_data;
6624 while (trace_empty(iter)) {
6626 if ((filp->f_flags & O_NONBLOCK)) {
6631 * We block until we read something and tracing is disabled.
6632 * We still block if tracing is disabled, but we have never
6633 * read anything. This allows a user to cat this file, and
6634 * then enable tracing. But after we have read something,
6635 * we give an EOF when tracing is again disabled.
6637 * iter->pos will be 0 if we haven't read anything.
6639 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6642 mutex_unlock(&iter->mutex);
6644 ret = wait_on_pipe(iter, 0);
6646 mutex_lock(&iter->mutex);
6659 tracing_read_pipe(struct file *filp, char __user *ubuf,
6660 size_t cnt, loff_t *ppos)
6662 struct trace_iterator *iter = filp->private_data;
6666 * Avoid more than one consumer on a single file descriptor
6667 * This is just a matter of traces coherency, the ring buffer itself
6670 mutex_lock(&iter->mutex);
6672 /* return any leftover data */
6673 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6677 trace_seq_init(&iter->seq);
6679 if (iter->trace->read) {
6680 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6686 sret = tracing_wait_pipe(filp);
6690 /* stop when tracing is finished */
6691 if (trace_empty(iter)) {
6696 if (cnt >= PAGE_SIZE)
6697 cnt = PAGE_SIZE - 1;
6699 /* reset all but tr, trace, and overruns */
6700 memset(&iter->seq, 0,
6701 sizeof(struct trace_iterator) -
6702 offsetof(struct trace_iterator, seq));
6703 cpumask_clear(iter->started);
6704 trace_seq_init(&iter->seq);
6707 trace_event_read_lock();
6708 trace_access_lock(iter->cpu_file);
6709 while (trace_find_next_entry_inc(iter) != NULL) {
6710 enum print_line_t ret;
6711 int save_len = iter->seq.seq.len;
6713 ret = print_trace_line(iter);
6714 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6715 /* don't print partial lines */
6716 iter->seq.seq.len = save_len;
6719 if (ret != TRACE_TYPE_NO_CONSUME)
6720 trace_consume(iter);
6722 if (trace_seq_used(&iter->seq) >= cnt)
6726 * Setting the full flag means we reached the trace_seq buffer
6727 * size and we should leave by partial output condition above.
6728 * One of the trace_seq_* functions is not used properly.
6730 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6733 trace_access_unlock(iter->cpu_file);
6734 trace_event_read_unlock();
6736 /* Now copy what we have to the user */
6737 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6738 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6739 trace_seq_init(&iter->seq);
6742 * If there was nothing to send to user, in spite of consuming trace
6743 * entries, go back to wait for more entries.
6749 mutex_unlock(&iter->mutex);
6754 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6757 __free_page(spd->pages[idx]);
6761 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6767 /* Seq buffer is page-sized, exactly what we need. */
6769 save_len = iter->seq.seq.len;
6770 ret = print_trace_line(iter);
6772 if (trace_seq_has_overflowed(&iter->seq)) {
6773 iter->seq.seq.len = save_len;
6778 * This should not be hit, because it should only
6779 * be set if the iter->seq overflowed. But check it
6780 * anyway to be safe.
6782 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6783 iter->seq.seq.len = save_len;
6787 count = trace_seq_used(&iter->seq) - save_len;
6790 iter->seq.seq.len = save_len;
6794 if (ret != TRACE_TYPE_NO_CONSUME)
6795 trace_consume(iter);
6797 if (!trace_find_next_entry_inc(iter)) {
6807 static ssize_t tracing_splice_read_pipe(struct file *filp,
6809 struct pipe_inode_info *pipe,
6813 struct page *pages_def[PIPE_DEF_BUFFERS];
6814 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6815 struct trace_iterator *iter = filp->private_data;
6816 struct splice_pipe_desc spd = {
6818 .partial = partial_def,
6819 .nr_pages = 0, /* This gets updated below. */
6820 .nr_pages_max = PIPE_DEF_BUFFERS,
6821 .ops = &default_pipe_buf_ops,
6822 .spd_release = tracing_spd_release_pipe,
6828 if (splice_grow_spd(pipe, &spd))
6831 mutex_lock(&iter->mutex);
6833 if (iter->trace->splice_read) {
6834 ret = iter->trace->splice_read(iter, filp,
6835 ppos, pipe, len, flags);
6840 ret = tracing_wait_pipe(filp);
6844 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6849 trace_event_read_lock();
6850 trace_access_lock(iter->cpu_file);
6852 /* Fill as many pages as possible. */
6853 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6854 spd.pages[i] = alloc_page(GFP_KERNEL);
6858 rem = tracing_fill_pipe_page(rem, iter);
6860 /* Copy the data into the page, so we can start over. */
6861 ret = trace_seq_to_buffer(&iter->seq,
6862 page_address(spd.pages[i]),
6863 trace_seq_used(&iter->seq));
6865 __free_page(spd.pages[i]);
6868 spd.partial[i].offset = 0;
6869 spd.partial[i].len = trace_seq_used(&iter->seq);
6871 trace_seq_init(&iter->seq);
6874 trace_access_unlock(iter->cpu_file);
6875 trace_event_read_unlock();
6876 mutex_unlock(&iter->mutex);
6881 ret = splice_to_pipe(pipe, &spd);
6885 splice_shrink_spd(&spd);
6889 mutex_unlock(&iter->mutex);
6894 tracing_entries_read(struct file *filp, char __user *ubuf,
6895 size_t cnt, loff_t *ppos)
6897 struct inode *inode = file_inode(filp);
6898 struct trace_array *tr = inode->i_private;
6899 int cpu = tracing_get_cpu(inode);
6904 mutex_lock(&trace_types_lock);
6906 if (cpu == RING_BUFFER_ALL_CPUS) {
6907 int cpu, buf_size_same;
6912 /* check if all cpu sizes are same */
6913 for_each_tracing_cpu(cpu) {
6914 /* fill in the size from first enabled cpu */
6916 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6917 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6923 if (buf_size_same) {
6924 if (!ring_buffer_expanded)
6925 r = sprintf(buf, "%lu (expanded: %lu)\n",
6927 trace_buf_size >> 10);
6929 r = sprintf(buf, "%lu\n", size >> 10);
6931 r = sprintf(buf, "X\n");
6933 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6935 mutex_unlock(&trace_types_lock);
6937 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6942 tracing_entries_write(struct file *filp, const char __user *ubuf,
6943 size_t cnt, loff_t *ppos)
6945 struct inode *inode = file_inode(filp);
6946 struct trace_array *tr = inode->i_private;
6950 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6954 /* must have at least 1 entry */
6958 /* value is in KB */
6960 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6970 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6971 size_t cnt, loff_t *ppos)
6973 struct trace_array *tr = filp->private_data;
6976 unsigned long size = 0, expanded_size = 0;
6978 mutex_lock(&trace_types_lock);
6979 for_each_tracing_cpu(cpu) {
6980 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6981 if (!ring_buffer_expanded)
6982 expanded_size += trace_buf_size >> 10;
6984 if (ring_buffer_expanded)
6985 r = sprintf(buf, "%lu\n", size);
6987 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6988 mutex_unlock(&trace_types_lock);
6990 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6994 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6995 size_t cnt, loff_t *ppos)
6998 * There is no need to read what the user has written, this function
6999 * is just to make sure that there is no error when "echo" is used
7008 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7010 struct trace_array *tr = inode->i_private;
7012 /* disable tracing ? */
7013 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7014 tracer_tracing_off(tr);
7015 /* resize the ring buffer to 0 */
7016 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7018 trace_array_put(tr);
7024 tracing_mark_write(struct file *filp, const char __user *ubuf,
7025 size_t cnt, loff_t *fpos)
7027 struct trace_array *tr = filp->private_data;
7028 struct ring_buffer_event *event;
7029 enum event_trigger_type tt = ETT_NONE;
7030 struct trace_buffer *buffer;
7031 struct print_entry *entry;
7036 /* Used in tracing_mark_raw_write() as well */
7037 #define FAULTED_STR "<faulted>"
7038 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7040 if (tracing_disabled)
7043 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7046 if (cnt > TRACE_BUF_SIZE)
7047 cnt = TRACE_BUF_SIZE;
7049 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7051 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7053 /* If less than "<faulted>", then make sure we can still add that */
7054 if (cnt < FAULTED_SIZE)
7055 size += FAULTED_SIZE - cnt;
7057 buffer = tr->array_buffer.buffer;
7058 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7060 if (unlikely(!event))
7061 /* Ring buffer disabled, return as if not open for write */
7064 entry = ring_buffer_event_data(event);
7065 entry->ip = _THIS_IP_;
7067 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7069 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7075 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7076 /* do not add \n before testing triggers, but add \0 */
7077 entry->buf[cnt] = '\0';
7078 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7081 if (entry->buf[cnt - 1] != '\n') {
7082 entry->buf[cnt] = '\n';
7083 entry->buf[cnt + 1] = '\0';
7085 entry->buf[cnt] = '\0';
7087 if (static_branch_unlikely(&trace_marker_exports_enabled))
7088 ftrace_exports(event, TRACE_EXPORT_MARKER);
7089 __buffer_unlock_commit(buffer, event);
7092 event_triggers_post_call(tr->trace_marker_file, tt);
7100 /* Limit it for now to 3K (including tag) */
7101 #define RAW_DATA_MAX_SIZE (1024*3)
7104 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7105 size_t cnt, loff_t *fpos)
7107 struct trace_array *tr = filp->private_data;
7108 struct ring_buffer_event *event;
7109 struct trace_buffer *buffer;
7110 struct raw_data_entry *entry;
7115 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7117 if (tracing_disabled)
7120 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7123 /* The marker must at least have a tag id */
7124 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7127 if (cnt > TRACE_BUF_SIZE)
7128 cnt = TRACE_BUF_SIZE;
7130 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7132 size = sizeof(*entry) + cnt;
7133 if (cnt < FAULT_SIZE_ID)
7134 size += FAULT_SIZE_ID - cnt;
7136 buffer = tr->array_buffer.buffer;
7137 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7140 /* Ring buffer disabled, return as if not open for write */
7143 entry = ring_buffer_event_data(event);
7145 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7148 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7153 __buffer_unlock_commit(buffer, event);
7161 static int tracing_clock_show(struct seq_file *m, void *v)
7163 struct trace_array *tr = m->private;
7166 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7168 "%s%s%s%s", i ? " " : "",
7169 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7170 i == tr->clock_id ? "]" : "");
7176 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7180 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7181 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7184 if (i == ARRAY_SIZE(trace_clocks))
7187 mutex_lock(&trace_types_lock);
7191 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7194 * New clock may not be consistent with the previous clock.
7195 * Reset the buffer so that it doesn't have incomparable timestamps.
7197 tracing_reset_online_cpus(&tr->array_buffer);
7199 #ifdef CONFIG_TRACER_MAX_TRACE
7200 if (tr->max_buffer.buffer)
7201 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7202 tracing_reset_online_cpus(&tr->max_buffer);
7205 mutex_unlock(&trace_types_lock);
7210 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7211 size_t cnt, loff_t *fpos)
7213 struct seq_file *m = filp->private_data;
7214 struct trace_array *tr = m->private;
7216 const char *clockstr;
7219 if (cnt >= sizeof(buf))
7222 if (copy_from_user(buf, ubuf, cnt))
7227 clockstr = strstrip(buf);
7229 ret = tracing_set_clock(tr, clockstr);
7238 static int tracing_clock_open(struct inode *inode, struct file *file)
7240 struct trace_array *tr = inode->i_private;
7243 ret = tracing_check_open_get_tr(tr);
7247 ret = single_open(file, tracing_clock_show, inode->i_private);
7249 trace_array_put(tr);
7254 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7256 struct trace_array *tr = m->private;
7258 mutex_lock(&trace_types_lock);
7260 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7261 seq_puts(m, "delta [absolute]\n");
7263 seq_puts(m, "[delta] absolute\n");
7265 mutex_unlock(&trace_types_lock);
7270 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7272 struct trace_array *tr = inode->i_private;
7275 ret = tracing_check_open_get_tr(tr);
7279 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7281 trace_array_put(tr);
7286 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7288 if (rbe == this_cpu_read(trace_buffered_event))
7289 return ring_buffer_time_stamp(buffer);
7291 return ring_buffer_event_time_stamp(buffer, rbe);
7295 * Set or disable using the per CPU trace_buffer_event when possible.
7297 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7301 mutex_lock(&trace_types_lock);
7303 if (set && tr->no_filter_buffering_ref++)
7307 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7312 --tr->no_filter_buffering_ref;
7315 mutex_unlock(&trace_types_lock);
7320 struct ftrace_buffer_info {
7321 struct trace_iterator iter;
7323 unsigned int spare_cpu;
7327 #ifdef CONFIG_TRACER_SNAPSHOT
7328 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7330 struct trace_array *tr = inode->i_private;
7331 struct trace_iterator *iter;
7335 ret = tracing_check_open_get_tr(tr);
7339 if (file->f_mode & FMODE_READ) {
7340 iter = __tracing_open(inode, file, true);
7342 ret = PTR_ERR(iter);
7344 /* Writes still need the seq_file to hold the private data */
7346 m = kzalloc(sizeof(*m), GFP_KERNEL);
7349 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7357 iter->array_buffer = &tr->max_buffer;
7358 iter->cpu_file = tracing_get_cpu(inode);
7360 file->private_data = m;
7364 trace_array_put(tr);
7370 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7373 struct seq_file *m = filp->private_data;
7374 struct trace_iterator *iter = m->private;
7375 struct trace_array *tr = iter->tr;
7379 ret = tracing_update_buffers();
7383 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7387 mutex_lock(&trace_types_lock);
7389 if (tr->current_trace->use_max_tr) {
7394 arch_spin_lock(&tr->max_lock);
7395 if (tr->cond_snapshot)
7397 arch_spin_unlock(&tr->max_lock);
7403 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7407 if (tr->allocated_snapshot)
7411 /* Only allow per-cpu swap if the ring buffer supports it */
7412 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7413 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7418 if (tr->allocated_snapshot)
7419 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7420 &tr->array_buffer, iter->cpu_file);
7422 ret = tracing_alloc_snapshot_instance(tr);
7425 local_irq_disable();
7426 /* Now, we're going to swap */
7427 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7428 update_max_tr(tr, current, smp_processor_id(), NULL);
7430 update_max_tr_single(tr, current, iter->cpu_file);
7434 if (tr->allocated_snapshot) {
7435 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7436 tracing_reset_online_cpus(&tr->max_buffer);
7438 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7448 mutex_unlock(&trace_types_lock);
7452 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7454 struct seq_file *m = file->private_data;
7457 ret = tracing_release(inode, file);
7459 if (file->f_mode & FMODE_READ)
7462 /* If write only, the seq_file is just a stub */
7470 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7471 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7472 size_t count, loff_t *ppos);
7473 static int tracing_buffers_release(struct inode *inode, struct file *file);
7474 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7475 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7477 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7479 struct ftrace_buffer_info *info;
7482 /* The following checks for tracefs lockdown */
7483 ret = tracing_buffers_open(inode, filp);
7487 info = filp->private_data;
7489 if (info->iter.trace->use_max_tr) {
7490 tracing_buffers_release(inode, filp);
7494 info->iter.snapshot = true;
7495 info->iter.array_buffer = &info->iter.tr->max_buffer;
7500 #endif /* CONFIG_TRACER_SNAPSHOT */
7503 static const struct file_operations tracing_thresh_fops = {
7504 .open = tracing_open_generic,
7505 .read = tracing_thresh_read,
7506 .write = tracing_thresh_write,
7507 .llseek = generic_file_llseek,
7510 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7511 static const struct file_operations tracing_max_lat_fops = {
7512 .open = tracing_open_generic,
7513 .read = tracing_max_lat_read,
7514 .write = tracing_max_lat_write,
7515 .llseek = generic_file_llseek,
7519 static const struct file_operations set_tracer_fops = {
7520 .open = tracing_open_generic,
7521 .read = tracing_set_trace_read,
7522 .write = tracing_set_trace_write,
7523 .llseek = generic_file_llseek,
7526 static const struct file_operations tracing_pipe_fops = {
7527 .open = tracing_open_pipe,
7528 .poll = tracing_poll_pipe,
7529 .read = tracing_read_pipe,
7530 .splice_read = tracing_splice_read_pipe,
7531 .release = tracing_release_pipe,
7532 .llseek = no_llseek,
7535 static const struct file_operations tracing_entries_fops = {
7536 .open = tracing_open_generic_tr,
7537 .read = tracing_entries_read,
7538 .write = tracing_entries_write,
7539 .llseek = generic_file_llseek,
7540 .release = tracing_release_generic_tr,
7543 static const struct file_operations tracing_total_entries_fops = {
7544 .open = tracing_open_generic_tr,
7545 .read = tracing_total_entries_read,
7546 .llseek = generic_file_llseek,
7547 .release = tracing_release_generic_tr,
7550 static const struct file_operations tracing_free_buffer_fops = {
7551 .open = tracing_open_generic_tr,
7552 .write = tracing_free_buffer_write,
7553 .release = tracing_free_buffer_release,
7556 static const struct file_operations tracing_mark_fops = {
7557 .open = tracing_open_generic_tr,
7558 .write = tracing_mark_write,
7559 .llseek = generic_file_llseek,
7560 .release = tracing_release_generic_tr,
7563 static const struct file_operations tracing_mark_raw_fops = {
7564 .open = tracing_open_generic_tr,
7565 .write = tracing_mark_raw_write,
7566 .llseek = generic_file_llseek,
7567 .release = tracing_release_generic_tr,
7570 static const struct file_operations trace_clock_fops = {
7571 .open = tracing_clock_open,
7573 .llseek = seq_lseek,
7574 .release = tracing_single_release_tr,
7575 .write = tracing_clock_write,
7578 static const struct file_operations trace_time_stamp_mode_fops = {
7579 .open = tracing_time_stamp_mode_open,
7581 .llseek = seq_lseek,
7582 .release = tracing_single_release_tr,
7585 #ifdef CONFIG_TRACER_SNAPSHOT
7586 static const struct file_operations snapshot_fops = {
7587 .open = tracing_snapshot_open,
7589 .write = tracing_snapshot_write,
7590 .llseek = tracing_lseek,
7591 .release = tracing_snapshot_release,
7594 static const struct file_operations snapshot_raw_fops = {
7595 .open = snapshot_raw_open,
7596 .read = tracing_buffers_read,
7597 .release = tracing_buffers_release,
7598 .splice_read = tracing_buffers_splice_read,
7599 .llseek = no_llseek,
7602 #endif /* CONFIG_TRACER_SNAPSHOT */
7605 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7606 * @filp: The active open file structure
7607 * @ubuf: The userspace provided buffer to read value into
7608 * @cnt: The maximum number of bytes to read
7609 * @ppos: The current "file" position
7611 * This function implements the write interface for a struct trace_min_max_param.
7612 * The filp->private_data must point to a trace_min_max_param structure that
7613 * defines where to write the value, the min and the max acceptable values,
7614 * and a lock to protect the write.
7617 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7619 struct trace_min_max_param *param = filp->private_data;
7626 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7631 mutex_lock(param->lock);
7633 if (param->min && val < *param->min)
7636 if (param->max && val > *param->max)
7643 mutex_unlock(param->lock);
7652 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7653 * @filp: The active open file structure
7654 * @ubuf: The userspace provided buffer to read value into
7655 * @cnt: The maximum number of bytes to read
7656 * @ppos: The current "file" position
7658 * This function implements the read interface for a struct trace_min_max_param.
7659 * The filp->private_data must point to a trace_min_max_param struct with valid
7663 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7665 struct trace_min_max_param *param = filp->private_data;
7666 char buf[U64_STR_SIZE];
7675 if (cnt > sizeof(buf))
7678 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7680 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7683 const struct file_operations trace_min_max_fops = {
7684 .open = tracing_open_generic,
7685 .read = trace_min_max_read,
7686 .write = trace_min_max_write,
7689 #define TRACING_LOG_ERRS_MAX 8
7690 #define TRACING_LOG_LOC_MAX 128
7692 #define CMD_PREFIX " Command: "
7695 const char **errs; /* ptr to loc-specific array of err strings */
7696 u8 type; /* index into errs -> specific err string */
7697 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7701 struct tracing_log_err {
7702 struct list_head list;
7703 struct err_info info;
7704 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7705 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7708 static DEFINE_MUTEX(tracing_err_log_lock);
7710 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7712 struct tracing_log_err *err;
7714 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7715 err = kzalloc(sizeof(*err), GFP_KERNEL);
7717 err = ERR_PTR(-ENOMEM);
7718 tr->n_err_log_entries++;
7723 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7724 list_del(&err->list);
7730 * err_pos - find the position of a string within a command for error careting
7731 * @cmd: The tracing command that caused the error
7732 * @str: The string to position the caret at within @cmd
7734 * Finds the position of the first occurrence of @str within @cmd. The
7735 * return value can be passed to tracing_log_err() for caret placement
7738 * Returns the index within @cmd of the first occurrence of @str or 0
7739 * if @str was not found.
7741 unsigned int err_pos(char *cmd, const char *str)
7745 if (WARN_ON(!strlen(cmd)))
7748 found = strstr(cmd, str);
7756 * tracing_log_err - write an error to the tracing error log
7757 * @tr: The associated trace array for the error (NULL for top level array)
7758 * @loc: A string describing where the error occurred
7759 * @cmd: The tracing command that caused the error
7760 * @errs: The array of loc-specific static error strings
7761 * @type: The index into errs[], which produces the specific static err string
7762 * @pos: The position the caret should be placed in the cmd
7764 * Writes an error into tracing/error_log of the form:
7766 * <loc>: error: <text>
7770 * tracing/error_log is a small log file containing the last
7771 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7772 * unless there has been a tracing error, and the error log can be
7773 * cleared and have its memory freed by writing the empty string in
7774 * truncation mode to it i.e. echo > tracing/error_log.
7776 * NOTE: the @errs array along with the @type param are used to
7777 * produce a static error string - this string is not copied and saved
7778 * when the error is logged - only a pointer to it is saved. See
7779 * existing callers for examples of how static strings are typically
7780 * defined for use with tracing_log_err().
7782 void tracing_log_err(struct trace_array *tr,
7783 const char *loc, const char *cmd,
7784 const char **errs, u8 type, u8 pos)
7786 struct tracing_log_err *err;
7791 mutex_lock(&tracing_err_log_lock);
7792 err = get_tracing_log_err(tr);
7793 if (PTR_ERR(err) == -ENOMEM) {
7794 mutex_unlock(&tracing_err_log_lock);
7798 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7799 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7801 err->info.errs = errs;
7802 err->info.type = type;
7803 err->info.pos = pos;
7804 err->info.ts = local_clock();
7806 list_add_tail(&err->list, &tr->err_log);
7807 mutex_unlock(&tracing_err_log_lock);
7810 static void clear_tracing_err_log(struct trace_array *tr)
7812 struct tracing_log_err *err, *next;
7814 mutex_lock(&tracing_err_log_lock);
7815 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7816 list_del(&err->list);
7820 tr->n_err_log_entries = 0;
7821 mutex_unlock(&tracing_err_log_lock);
7824 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7826 struct trace_array *tr = m->private;
7828 mutex_lock(&tracing_err_log_lock);
7830 return seq_list_start(&tr->err_log, *pos);
7833 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7835 struct trace_array *tr = m->private;
7837 return seq_list_next(v, &tr->err_log, pos);
7840 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7842 mutex_unlock(&tracing_err_log_lock);
7845 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7849 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7851 for (i = 0; i < pos; i++)
7856 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7858 struct tracing_log_err *err = v;
7861 const char *err_text = err->info.errs[err->info.type];
7862 u64 sec = err->info.ts;
7865 nsec = do_div(sec, NSEC_PER_SEC);
7866 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7867 err->loc, err_text);
7868 seq_printf(m, "%s", err->cmd);
7869 tracing_err_log_show_pos(m, err->info.pos);
7875 static const struct seq_operations tracing_err_log_seq_ops = {
7876 .start = tracing_err_log_seq_start,
7877 .next = tracing_err_log_seq_next,
7878 .stop = tracing_err_log_seq_stop,
7879 .show = tracing_err_log_seq_show
7882 static int tracing_err_log_open(struct inode *inode, struct file *file)
7884 struct trace_array *tr = inode->i_private;
7887 ret = tracing_check_open_get_tr(tr);
7891 /* If this file was opened for write, then erase contents */
7892 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7893 clear_tracing_err_log(tr);
7895 if (file->f_mode & FMODE_READ) {
7896 ret = seq_open(file, &tracing_err_log_seq_ops);
7898 struct seq_file *m = file->private_data;
7901 trace_array_put(tr);
7907 static ssize_t tracing_err_log_write(struct file *file,
7908 const char __user *buffer,
7909 size_t count, loff_t *ppos)
7914 static int tracing_err_log_release(struct inode *inode, struct file *file)
7916 struct trace_array *tr = inode->i_private;
7918 trace_array_put(tr);
7920 if (file->f_mode & FMODE_READ)
7921 seq_release(inode, file);
7926 static const struct file_operations tracing_err_log_fops = {
7927 .open = tracing_err_log_open,
7928 .write = tracing_err_log_write,
7930 .llseek = seq_lseek,
7931 .release = tracing_err_log_release,
7934 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7936 struct trace_array *tr = inode->i_private;
7937 struct ftrace_buffer_info *info;
7940 ret = tracing_check_open_get_tr(tr);
7944 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7946 trace_array_put(tr);
7950 mutex_lock(&trace_types_lock);
7953 info->iter.cpu_file = tracing_get_cpu(inode);
7954 info->iter.trace = tr->current_trace;
7955 info->iter.array_buffer = &tr->array_buffer;
7957 /* Force reading ring buffer for first read */
7958 info->read = (unsigned int)-1;
7960 filp->private_data = info;
7964 mutex_unlock(&trace_types_lock);
7966 ret = nonseekable_open(inode, filp);
7968 trace_array_put(tr);
7974 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7976 struct ftrace_buffer_info *info = filp->private_data;
7977 struct trace_iterator *iter = &info->iter;
7979 return trace_poll(iter, filp, poll_table);
7983 tracing_buffers_read(struct file *filp, char __user *ubuf,
7984 size_t count, loff_t *ppos)
7986 struct ftrace_buffer_info *info = filp->private_data;
7987 struct trace_iterator *iter = &info->iter;
7994 #ifdef CONFIG_TRACER_MAX_TRACE
7995 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8000 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8002 if (IS_ERR(info->spare)) {
8003 ret = PTR_ERR(info->spare);
8006 info->spare_cpu = iter->cpu_file;
8012 /* Do we have previous read data to read? */
8013 if (info->read < PAGE_SIZE)
8017 trace_access_lock(iter->cpu_file);
8018 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8022 trace_access_unlock(iter->cpu_file);
8025 if (trace_empty(iter)) {
8026 if ((filp->f_flags & O_NONBLOCK))
8029 ret = wait_on_pipe(iter, 0);
8040 size = PAGE_SIZE - info->read;
8044 ret = copy_to_user(ubuf, info->spare + info->read, size);
8056 static int tracing_buffers_release(struct inode *inode, struct file *file)
8058 struct ftrace_buffer_info *info = file->private_data;
8059 struct trace_iterator *iter = &info->iter;
8061 mutex_lock(&trace_types_lock);
8063 iter->tr->trace_ref--;
8065 __trace_array_put(iter->tr);
8068 ring_buffer_free_read_page(iter->array_buffer->buffer,
8069 info->spare_cpu, info->spare);
8072 mutex_unlock(&trace_types_lock);
8078 struct trace_buffer *buffer;
8081 refcount_t refcount;
8084 static void buffer_ref_release(struct buffer_ref *ref)
8086 if (!refcount_dec_and_test(&ref->refcount))
8088 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8092 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8093 struct pipe_buffer *buf)
8095 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8097 buffer_ref_release(ref);
8101 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8102 struct pipe_buffer *buf)
8104 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8106 if (refcount_read(&ref->refcount) > INT_MAX/2)
8109 refcount_inc(&ref->refcount);
8113 /* Pipe buffer operations for a buffer. */
8114 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8115 .release = buffer_pipe_buf_release,
8116 .get = buffer_pipe_buf_get,
8120 * Callback from splice_to_pipe(), if we need to release some pages
8121 * at the end of the spd in case we error'ed out in filling the pipe.
8123 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8125 struct buffer_ref *ref =
8126 (struct buffer_ref *)spd->partial[i].private;
8128 buffer_ref_release(ref);
8129 spd->partial[i].private = 0;
8133 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8134 struct pipe_inode_info *pipe, size_t len,
8137 struct ftrace_buffer_info *info = file->private_data;
8138 struct trace_iterator *iter = &info->iter;
8139 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8140 struct page *pages_def[PIPE_DEF_BUFFERS];
8141 struct splice_pipe_desc spd = {
8143 .partial = partial_def,
8144 .nr_pages_max = PIPE_DEF_BUFFERS,
8145 .ops = &buffer_pipe_buf_ops,
8146 .spd_release = buffer_spd_release,
8148 struct buffer_ref *ref;
8152 #ifdef CONFIG_TRACER_MAX_TRACE
8153 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8157 if (*ppos & (PAGE_SIZE - 1))
8160 if (len & (PAGE_SIZE - 1)) {
8161 if (len < PAGE_SIZE)
8166 if (splice_grow_spd(pipe, &spd))
8170 trace_access_lock(iter->cpu_file);
8171 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8173 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8177 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8183 refcount_set(&ref->refcount, 1);
8184 ref->buffer = iter->array_buffer->buffer;
8185 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8186 if (IS_ERR(ref->page)) {
8187 ret = PTR_ERR(ref->page);
8192 ref->cpu = iter->cpu_file;
8194 r = ring_buffer_read_page(ref->buffer, &ref->page,
8195 len, iter->cpu_file, 1);
8197 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8203 page = virt_to_page(ref->page);
8205 spd.pages[i] = page;
8206 spd.partial[i].len = PAGE_SIZE;
8207 spd.partial[i].offset = 0;
8208 spd.partial[i].private = (unsigned long)ref;
8212 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8215 trace_access_unlock(iter->cpu_file);
8218 /* did we read anything? */
8219 if (!spd.nr_pages) {
8224 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8227 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8234 ret = splice_to_pipe(pipe, &spd);
8236 splice_shrink_spd(&spd);
8241 static const struct file_operations tracing_buffers_fops = {
8242 .open = tracing_buffers_open,
8243 .read = tracing_buffers_read,
8244 .poll = tracing_buffers_poll,
8245 .release = tracing_buffers_release,
8246 .splice_read = tracing_buffers_splice_read,
8247 .llseek = no_llseek,
8251 tracing_stats_read(struct file *filp, char __user *ubuf,
8252 size_t count, loff_t *ppos)
8254 struct inode *inode = file_inode(filp);
8255 struct trace_array *tr = inode->i_private;
8256 struct array_buffer *trace_buf = &tr->array_buffer;
8257 int cpu = tracing_get_cpu(inode);
8258 struct trace_seq *s;
8260 unsigned long long t;
8261 unsigned long usec_rem;
8263 s = kmalloc(sizeof(*s), GFP_KERNEL);
8269 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8270 trace_seq_printf(s, "entries: %ld\n", cnt);
8272 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8273 trace_seq_printf(s, "overrun: %ld\n", cnt);
8275 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8276 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8278 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8279 trace_seq_printf(s, "bytes: %ld\n", cnt);
8281 if (trace_clocks[tr->clock_id].in_ns) {
8282 /* local or global for trace_clock */
8283 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8284 usec_rem = do_div(t, USEC_PER_SEC);
8285 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8288 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8289 usec_rem = do_div(t, USEC_PER_SEC);
8290 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8292 /* counter or tsc mode for trace_clock */
8293 trace_seq_printf(s, "oldest event ts: %llu\n",
8294 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8296 trace_seq_printf(s, "now ts: %llu\n",
8297 ring_buffer_time_stamp(trace_buf->buffer));
8300 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8301 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8303 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8304 trace_seq_printf(s, "read events: %ld\n", cnt);
8306 count = simple_read_from_buffer(ubuf, count, ppos,
8307 s->buffer, trace_seq_used(s));
8314 static const struct file_operations tracing_stats_fops = {
8315 .open = tracing_open_generic_tr,
8316 .read = tracing_stats_read,
8317 .llseek = generic_file_llseek,
8318 .release = tracing_release_generic_tr,
8321 #ifdef CONFIG_DYNAMIC_FTRACE
8324 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8325 size_t cnt, loff_t *ppos)
8331 /* 256 should be plenty to hold the amount needed */
8332 buf = kmalloc(256, GFP_KERNEL);
8336 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8337 ftrace_update_tot_cnt,
8338 ftrace_number_of_pages,
8339 ftrace_number_of_groups);
8341 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8346 static const struct file_operations tracing_dyn_info_fops = {
8347 .open = tracing_open_generic,
8348 .read = tracing_read_dyn_info,
8349 .llseek = generic_file_llseek,
8351 #endif /* CONFIG_DYNAMIC_FTRACE */
8353 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8355 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8356 struct trace_array *tr, struct ftrace_probe_ops *ops,
8359 tracing_snapshot_instance(tr);
8363 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8364 struct trace_array *tr, struct ftrace_probe_ops *ops,
8367 struct ftrace_func_mapper *mapper = data;
8371 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8381 tracing_snapshot_instance(tr);
8385 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8386 struct ftrace_probe_ops *ops, void *data)
8388 struct ftrace_func_mapper *mapper = data;
8391 seq_printf(m, "%ps:", (void *)ip);
8393 seq_puts(m, "snapshot");
8396 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8399 seq_printf(m, ":count=%ld\n", *count);
8401 seq_puts(m, ":unlimited\n");
8407 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8408 unsigned long ip, void *init_data, void **data)
8410 struct ftrace_func_mapper *mapper = *data;
8413 mapper = allocate_ftrace_func_mapper();
8419 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8423 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8424 unsigned long ip, void *data)
8426 struct ftrace_func_mapper *mapper = data;
8431 free_ftrace_func_mapper(mapper, NULL);
8435 ftrace_func_mapper_remove_ip(mapper, ip);
8438 static struct ftrace_probe_ops snapshot_probe_ops = {
8439 .func = ftrace_snapshot,
8440 .print = ftrace_snapshot_print,
8443 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8444 .func = ftrace_count_snapshot,
8445 .print = ftrace_snapshot_print,
8446 .init = ftrace_snapshot_init,
8447 .free = ftrace_snapshot_free,
8451 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8452 char *glob, char *cmd, char *param, int enable)
8454 struct ftrace_probe_ops *ops;
8455 void *count = (void *)-1;
8462 /* hash funcs only work with set_ftrace_filter */
8466 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8469 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8474 number = strsep(¶m, ":");
8476 if (!strlen(number))
8480 * We use the callback data field (which is a pointer)
8483 ret = kstrtoul(number, 0, (unsigned long *)&count);
8488 ret = tracing_alloc_snapshot_instance(tr);
8492 ret = register_ftrace_function_probe(glob, tr, ops, count);
8495 return ret < 0 ? ret : 0;
8498 static struct ftrace_func_command ftrace_snapshot_cmd = {
8500 .func = ftrace_trace_snapshot_callback,
8503 static __init int register_snapshot_cmd(void)
8505 return register_ftrace_command(&ftrace_snapshot_cmd);
8508 static inline __init int register_snapshot_cmd(void) { return 0; }
8509 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8511 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8513 if (WARN_ON(!tr->dir))
8514 return ERR_PTR(-ENODEV);
8516 /* Top directory uses NULL as the parent */
8517 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8520 /* All sub buffers have a descriptor */
8524 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8526 struct dentry *d_tracer;
8529 return tr->percpu_dir;
8531 d_tracer = tracing_get_dentry(tr);
8532 if (IS_ERR(d_tracer))
8535 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8537 MEM_FAIL(!tr->percpu_dir,
8538 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8540 return tr->percpu_dir;
8543 static struct dentry *
8544 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8545 void *data, long cpu, const struct file_operations *fops)
8547 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8549 if (ret) /* See tracing_get_cpu() */
8550 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8555 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8557 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8558 struct dentry *d_cpu;
8559 char cpu_dir[30]; /* 30 characters should be more than enough */
8564 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8565 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8567 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8571 /* per cpu trace_pipe */
8572 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8573 tr, cpu, &tracing_pipe_fops);
8576 trace_create_cpu_file("trace", 0644, d_cpu,
8577 tr, cpu, &tracing_fops);
8579 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8580 tr, cpu, &tracing_buffers_fops);
8582 trace_create_cpu_file("stats", 0444, d_cpu,
8583 tr, cpu, &tracing_stats_fops);
8585 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8586 tr, cpu, &tracing_entries_fops);
8588 #ifdef CONFIG_TRACER_SNAPSHOT
8589 trace_create_cpu_file("snapshot", 0644, d_cpu,
8590 tr, cpu, &snapshot_fops);
8592 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8593 tr, cpu, &snapshot_raw_fops);
8597 #ifdef CONFIG_FTRACE_SELFTEST
8598 /* Let selftest have access to static functions in this file */
8599 #include "trace_selftest.c"
8603 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8606 struct trace_option_dentry *topt = filp->private_data;
8609 if (topt->flags->val & topt->opt->bit)
8614 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8618 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8621 struct trace_option_dentry *topt = filp->private_data;
8625 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8629 if (val != 0 && val != 1)
8632 if (!!(topt->flags->val & topt->opt->bit) != val) {
8633 mutex_lock(&trace_types_lock);
8634 ret = __set_tracer_option(topt->tr, topt->flags,
8636 mutex_unlock(&trace_types_lock);
8647 static const struct file_operations trace_options_fops = {
8648 .open = tracing_open_generic,
8649 .read = trace_options_read,
8650 .write = trace_options_write,
8651 .llseek = generic_file_llseek,
8655 * In order to pass in both the trace_array descriptor as well as the index
8656 * to the flag that the trace option file represents, the trace_array
8657 * has a character array of trace_flags_index[], which holds the index
8658 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8659 * The address of this character array is passed to the flag option file
8660 * read/write callbacks.
8662 * In order to extract both the index and the trace_array descriptor,
8663 * get_tr_index() uses the following algorithm.
8667 * As the pointer itself contains the address of the index (remember
8670 * Then to get the trace_array descriptor, by subtracting that index
8671 * from the ptr, we get to the start of the index itself.
8673 * ptr - idx == &index[0]
8675 * Then a simple container_of() from that pointer gets us to the
8676 * trace_array descriptor.
8678 static void get_tr_index(void *data, struct trace_array **ptr,
8679 unsigned int *pindex)
8681 *pindex = *(unsigned char *)data;
8683 *ptr = container_of(data - *pindex, struct trace_array,
8688 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8691 void *tr_index = filp->private_data;
8692 struct trace_array *tr;
8696 get_tr_index(tr_index, &tr, &index);
8698 if (tr->trace_flags & (1 << index))
8703 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8707 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8710 void *tr_index = filp->private_data;
8711 struct trace_array *tr;
8716 get_tr_index(tr_index, &tr, &index);
8718 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8722 if (val != 0 && val != 1)
8725 mutex_lock(&event_mutex);
8726 mutex_lock(&trace_types_lock);
8727 ret = set_tracer_flag(tr, 1 << index, val);
8728 mutex_unlock(&trace_types_lock);
8729 mutex_unlock(&event_mutex);
8739 static const struct file_operations trace_options_core_fops = {
8740 .open = tracing_open_generic,
8741 .read = trace_options_core_read,
8742 .write = trace_options_core_write,
8743 .llseek = generic_file_llseek,
8746 struct dentry *trace_create_file(const char *name,
8748 struct dentry *parent,
8750 const struct file_operations *fops)
8754 ret = tracefs_create_file(name, mode, parent, data, fops);
8756 pr_warn("Could not create tracefs '%s' entry\n", name);
8762 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8764 struct dentry *d_tracer;
8769 d_tracer = tracing_get_dentry(tr);
8770 if (IS_ERR(d_tracer))
8773 tr->options = tracefs_create_dir("options", d_tracer);
8775 pr_warn("Could not create tracefs directory 'options'\n");
8783 create_trace_option_file(struct trace_array *tr,
8784 struct trace_option_dentry *topt,
8785 struct tracer_flags *flags,
8786 struct tracer_opt *opt)
8788 struct dentry *t_options;
8790 t_options = trace_options_init_dentry(tr);
8794 topt->flags = flags;
8798 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8799 &trace_options_fops);
8804 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8806 struct trace_option_dentry *topts;
8807 struct trace_options *tr_topts;
8808 struct tracer_flags *flags;
8809 struct tracer_opt *opts;
8816 flags = tracer->flags;
8818 if (!flags || !flags->opts)
8822 * If this is an instance, only create flags for tracers
8823 * the instance may have.
8825 if (!trace_ok_for_array(tracer, tr))
8828 for (i = 0; i < tr->nr_topts; i++) {
8829 /* Make sure there's no duplicate flags. */
8830 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8836 for (cnt = 0; opts[cnt].name; cnt++)
8839 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8843 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8850 tr->topts = tr_topts;
8851 tr->topts[tr->nr_topts].tracer = tracer;
8852 tr->topts[tr->nr_topts].topts = topts;
8855 for (cnt = 0; opts[cnt].name; cnt++) {
8856 create_trace_option_file(tr, &topts[cnt], flags,
8858 MEM_FAIL(topts[cnt].entry == NULL,
8859 "Failed to create trace option: %s",
8864 static struct dentry *
8865 create_trace_option_core_file(struct trace_array *tr,
8866 const char *option, long index)
8868 struct dentry *t_options;
8870 t_options = trace_options_init_dentry(tr);
8874 return trace_create_file(option, 0644, t_options,
8875 (void *)&tr->trace_flags_index[index],
8876 &trace_options_core_fops);
8879 static void create_trace_options_dir(struct trace_array *tr)
8881 struct dentry *t_options;
8882 bool top_level = tr == &global_trace;
8885 t_options = trace_options_init_dentry(tr);
8889 for (i = 0; trace_options[i]; i++) {
8891 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8892 create_trace_option_core_file(tr, trace_options[i], i);
8897 rb_simple_read(struct file *filp, char __user *ubuf,
8898 size_t cnt, loff_t *ppos)
8900 struct trace_array *tr = filp->private_data;
8904 r = tracer_tracing_is_on(tr);
8905 r = sprintf(buf, "%d\n", r);
8907 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8911 rb_simple_write(struct file *filp, const char __user *ubuf,
8912 size_t cnt, loff_t *ppos)
8914 struct trace_array *tr = filp->private_data;
8915 struct trace_buffer *buffer = tr->array_buffer.buffer;
8919 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8924 mutex_lock(&trace_types_lock);
8925 if (!!val == tracer_tracing_is_on(tr)) {
8926 val = 0; /* do nothing */
8928 tracer_tracing_on(tr);
8929 if (tr->current_trace->start)
8930 tr->current_trace->start(tr);
8932 tracer_tracing_off(tr);
8933 if (tr->current_trace->stop)
8934 tr->current_trace->stop(tr);
8936 mutex_unlock(&trace_types_lock);
8944 static const struct file_operations rb_simple_fops = {
8945 .open = tracing_open_generic_tr,
8946 .read = rb_simple_read,
8947 .write = rb_simple_write,
8948 .release = tracing_release_generic_tr,
8949 .llseek = default_llseek,
8953 buffer_percent_read(struct file *filp, char __user *ubuf,
8954 size_t cnt, loff_t *ppos)
8956 struct trace_array *tr = filp->private_data;
8960 r = tr->buffer_percent;
8961 r = sprintf(buf, "%d\n", r);
8963 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8967 buffer_percent_write(struct file *filp, const char __user *ubuf,
8968 size_t cnt, loff_t *ppos)
8970 struct trace_array *tr = filp->private_data;
8974 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8984 tr->buffer_percent = val;
8991 static const struct file_operations buffer_percent_fops = {
8992 .open = tracing_open_generic_tr,
8993 .read = buffer_percent_read,
8994 .write = buffer_percent_write,
8995 .release = tracing_release_generic_tr,
8996 .llseek = default_llseek,
8999 static struct dentry *trace_instance_dir;
9002 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9005 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9007 enum ring_buffer_flags rb_flags;
9009 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9013 buf->buffer = ring_buffer_alloc(size, rb_flags);
9017 buf->data = alloc_percpu(struct trace_array_cpu);
9019 ring_buffer_free(buf->buffer);
9024 /* Allocate the first page for all buffers */
9025 set_buffer_entries(&tr->array_buffer,
9026 ring_buffer_size(tr->array_buffer.buffer, 0));
9031 static int allocate_trace_buffers(struct trace_array *tr, int size)
9035 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9039 #ifdef CONFIG_TRACER_MAX_TRACE
9040 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9041 allocate_snapshot ? size : 1);
9042 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9043 ring_buffer_free(tr->array_buffer.buffer);
9044 tr->array_buffer.buffer = NULL;
9045 free_percpu(tr->array_buffer.data);
9046 tr->array_buffer.data = NULL;
9049 tr->allocated_snapshot = allocate_snapshot;
9052 * Only the top level trace array gets its snapshot allocated
9053 * from the kernel command line.
9055 allocate_snapshot = false;
9061 static void free_trace_buffer(struct array_buffer *buf)
9064 ring_buffer_free(buf->buffer);
9066 free_percpu(buf->data);
9071 static void free_trace_buffers(struct trace_array *tr)
9076 free_trace_buffer(&tr->array_buffer);
9078 #ifdef CONFIG_TRACER_MAX_TRACE
9079 free_trace_buffer(&tr->max_buffer);
9083 static void init_trace_flags_index(struct trace_array *tr)
9087 /* Used by the trace options files */
9088 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9089 tr->trace_flags_index[i] = i;
9092 static void __update_tracer_options(struct trace_array *tr)
9096 for (t = trace_types; t; t = t->next)
9097 add_tracer_options(tr, t);
9100 static void update_tracer_options(struct trace_array *tr)
9102 mutex_lock(&trace_types_lock);
9103 __update_tracer_options(tr);
9104 mutex_unlock(&trace_types_lock);
9107 /* Must have trace_types_lock held */
9108 struct trace_array *trace_array_find(const char *instance)
9110 struct trace_array *tr, *found = NULL;
9112 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9113 if (tr->name && strcmp(tr->name, instance) == 0) {
9122 struct trace_array *trace_array_find_get(const char *instance)
9124 struct trace_array *tr;
9126 mutex_lock(&trace_types_lock);
9127 tr = trace_array_find(instance);
9130 mutex_unlock(&trace_types_lock);
9135 static int trace_array_create_dir(struct trace_array *tr)
9139 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9143 ret = event_trace_add_tracer(tr->dir, tr);
9145 tracefs_remove(tr->dir);
9147 init_tracer_tracefs(tr, tr->dir);
9148 __update_tracer_options(tr);
9153 static struct trace_array *trace_array_create(const char *name)
9155 struct trace_array *tr;
9159 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9161 return ERR_PTR(ret);
9163 tr->name = kstrdup(name, GFP_KERNEL);
9167 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9170 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9172 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9174 raw_spin_lock_init(&tr->start_lock);
9176 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9178 tr->current_trace = &nop_trace;
9180 INIT_LIST_HEAD(&tr->systems);
9181 INIT_LIST_HEAD(&tr->events);
9182 INIT_LIST_HEAD(&tr->hist_vars);
9183 INIT_LIST_HEAD(&tr->err_log);
9185 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9188 if (ftrace_allocate_ftrace_ops(tr) < 0)
9191 ftrace_init_trace_array(tr);
9193 init_trace_flags_index(tr);
9195 if (trace_instance_dir) {
9196 ret = trace_array_create_dir(tr);
9200 __trace_early_add_events(tr);
9202 list_add(&tr->list, &ftrace_trace_arrays);
9209 ftrace_free_ftrace_ops(tr);
9210 free_trace_buffers(tr);
9211 free_cpumask_var(tr->tracing_cpumask);
9215 return ERR_PTR(ret);
9218 static int instance_mkdir(const char *name)
9220 struct trace_array *tr;
9223 mutex_lock(&event_mutex);
9224 mutex_lock(&trace_types_lock);
9227 if (trace_array_find(name))
9230 tr = trace_array_create(name);
9232 ret = PTR_ERR_OR_ZERO(tr);
9235 mutex_unlock(&trace_types_lock);
9236 mutex_unlock(&event_mutex);
9241 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9242 * @name: The name of the trace array to be looked up/created.
9244 * Returns pointer to trace array with given name.
9245 * NULL, if it cannot be created.
9247 * NOTE: This function increments the reference counter associated with the
9248 * trace array returned. This makes sure it cannot be freed while in use.
9249 * Use trace_array_put() once the trace array is no longer needed.
9250 * If the trace_array is to be freed, trace_array_destroy() needs to
9251 * be called after the trace_array_put(), or simply let user space delete
9252 * it from the tracefs instances directory. But until the
9253 * trace_array_put() is called, user space can not delete it.
9256 struct trace_array *trace_array_get_by_name(const char *name)
9258 struct trace_array *tr;
9260 mutex_lock(&event_mutex);
9261 mutex_lock(&trace_types_lock);
9263 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9264 if (tr->name && strcmp(tr->name, name) == 0)
9268 tr = trace_array_create(name);
9276 mutex_unlock(&trace_types_lock);
9277 mutex_unlock(&event_mutex);
9280 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9282 static int __remove_instance(struct trace_array *tr)
9286 /* Reference counter for a newly created trace array = 1. */
9287 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9290 list_del(&tr->list);
9292 /* Disable all the flags that were enabled coming in */
9293 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9294 if ((1 << i) & ZEROED_TRACE_FLAGS)
9295 set_tracer_flag(tr, 1 << i, 0);
9298 tracing_set_nop(tr);
9299 clear_ftrace_function_probes(tr);
9300 event_trace_del_tracer(tr);
9301 ftrace_clear_pids(tr);
9302 ftrace_destroy_function_files(tr);
9303 tracefs_remove(tr->dir);
9304 free_percpu(tr->last_func_repeats);
9305 free_trace_buffers(tr);
9307 for (i = 0; i < tr->nr_topts; i++) {
9308 kfree(tr->topts[i].topts);
9312 free_cpumask_var(tr->tracing_cpumask);
9319 int trace_array_destroy(struct trace_array *this_tr)
9321 struct trace_array *tr;
9327 mutex_lock(&event_mutex);
9328 mutex_lock(&trace_types_lock);
9332 /* Making sure trace array exists before destroying it. */
9333 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9334 if (tr == this_tr) {
9335 ret = __remove_instance(tr);
9340 mutex_unlock(&trace_types_lock);
9341 mutex_unlock(&event_mutex);
9345 EXPORT_SYMBOL_GPL(trace_array_destroy);
9347 static int instance_rmdir(const char *name)
9349 struct trace_array *tr;
9352 mutex_lock(&event_mutex);
9353 mutex_lock(&trace_types_lock);
9356 tr = trace_array_find(name);
9358 ret = __remove_instance(tr);
9360 mutex_unlock(&trace_types_lock);
9361 mutex_unlock(&event_mutex);
9366 static __init void create_trace_instances(struct dentry *d_tracer)
9368 struct trace_array *tr;
9370 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9373 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9376 mutex_lock(&event_mutex);
9377 mutex_lock(&trace_types_lock);
9379 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9382 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9383 "Failed to create instance directory\n"))
9387 mutex_unlock(&trace_types_lock);
9388 mutex_unlock(&event_mutex);
9392 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9394 struct trace_event_file *file;
9397 trace_create_file("available_tracers", 0444, d_tracer,
9398 tr, &show_traces_fops);
9400 trace_create_file("current_tracer", 0644, d_tracer,
9401 tr, &set_tracer_fops);
9403 trace_create_file("tracing_cpumask", 0644, d_tracer,
9404 tr, &tracing_cpumask_fops);
9406 trace_create_file("trace_options", 0644, d_tracer,
9407 tr, &tracing_iter_fops);
9409 trace_create_file("trace", 0644, d_tracer,
9412 trace_create_file("trace_pipe", 0444, d_tracer,
9413 tr, &tracing_pipe_fops);
9415 trace_create_file("buffer_size_kb", 0644, d_tracer,
9416 tr, &tracing_entries_fops);
9418 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9419 tr, &tracing_total_entries_fops);
9421 trace_create_file("free_buffer", 0200, d_tracer,
9422 tr, &tracing_free_buffer_fops);
9424 trace_create_file("trace_marker", 0220, d_tracer,
9425 tr, &tracing_mark_fops);
9427 file = __find_event_file(tr, "ftrace", "print");
9428 if (file && file->dir)
9429 trace_create_file("trigger", 0644, file->dir, file,
9430 &event_trigger_fops);
9431 tr->trace_marker_file = file;
9433 trace_create_file("trace_marker_raw", 0220, d_tracer,
9434 tr, &tracing_mark_raw_fops);
9436 trace_create_file("trace_clock", 0644, d_tracer, tr,
9439 trace_create_file("tracing_on", 0644, d_tracer,
9440 tr, &rb_simple_fops);
9442 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9443 &trace_time_stamp_mode_fops);
9445 tr->buffer_percent = 50;
9447 trace_create_file("buffer_percent", 0444, d_tracer,
9448 tr, &buffer_percent_fops);
9450 create_trace_options_dir(tr);
9452 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
9453 trace_create_maxlat_file(tr, d_tracer);
9456 if (ftrace_create_function_files(tr, d_tracer))
9457 MEM_FAIL(1, "Could not allocate function filter files");
9459 #ifdef CONFIG_TRACER_SNAPSHOT
9460 trace_create_file("snapshot", 0644, d_tracer,
9461 tr, &snapshot_fops);
9464 trace_create_file("error_log", 0644, d_tracer,
9465 tr, &tracing_err_log_fops);
9467 for_each_tracing_cpu(cpu)
9468 tracing_init_tracefs_percpu(tr, cpu);
9470 ftrace_init_tracefs(tr, d_tracer);
9473 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9475 struct vfsmount *mnt;
9476 struct file_system_type *type;
9479 * To maintain backward compatibility for tools that mount
9480 * debugfs to get to the tracing facility, tracefs is automatically
9481 * mounted to the debugfs/tracing directory.
9483 type = get_fs_type("tracefs");
9486 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9487 put_filesystem(type);
9496 * tracing_init_dentry - initialize top level trace array
9498 * This is called when creating files or directories in the tracing
9499 * directory. It is called via fs_initcall() by any of the boot up code
9500 * and expects to return the dentry of the top level tracing directory.
9502 int tracing_init_dentry(void)
9504 struct trace_array *tr = &global_trace;
9506 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9507 pr_warn("Tracing disabled due to lockdown\n");
9511 /* The top level trace array uses NULL as parent */
9515 if (WARN_ON(!tracefs_initialized()))
9519 * As there may still be users that expect the tracing
9520 * files to exist in debugfs/tracing, we must automount
9521 * the tracefs file system there, so older tools still
9522 * work with the newer kernel.
9524 tr->dir = debugfs_create_automount("tracing", NULL,
9525 trace_automount, NULL);
9530 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9531 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9533 static struct workqueue_struct *eval_map_wq __initdata;
9534 static struct work_struct eval_map_work __initdata;
9536 static void __init eval_map_work_func(struct work_struct *work)
9540 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9541 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9544 static int __init trace_eval_init(void)
9546 INIT_WORK(&eval_map_work, eval_map_work_func);
9548 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9550 pr_err("Unable to allocate eval_map_wq\n");
9552 eval_map_work_func(&eval_map_work);
9556 queue_work(eval_map_wq, &eval_map_work);
9560 static int __init trace_eval_sync(void)
9562 /* Make sure the eval map updates are finished */
9564 destroy_workqueue(eval_map_wq);
9568 late_initcall_sync(trace_eval_sync);
9571 #ifdef CONFIG_MODULES
9572 static void trace_module_add_evals(struct module *mod)
9574 if (!mod->num_trace_evals)
9578 * Modules with bad taint do not have events created, do
9579 * not bother with enums either.
9581 if (trace_module_has_bad_taint(mod))
9584 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9587 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9588 static void trace_module_remove_evals(struct module *mod)
9590 union trace_eval_map_item *map;
9591 union trace_eval_map_item **last = &trace_eval_maps;
9593 if (!mod->num_trace_evals)
9596 mutex_lock(&trace_eval_mutex);
9598 map = trace_eval_maps;
9601 if (map->head.mod == mod)
9603 map = trace_eval_jmp_to_tail(map);
9604 last = &map->tail.next;
9605 map = map->tail.next;
9610 *last = trace_eval_jmp_to_tail(map)->tail.next;
9613 mutex_unlock(&trace_eval_mutex);
9616 static inline void trace_module_remove_evals(struct module *mod) { }
9617 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9619 static int trace_module_notify(struct notifier_block *self,
9620 unsigned long val, void *data)
9622 struct module *mod = data;
9625 case MODULE_STATE_COMING:
9626 trace_module_add_evals(mod);
9628 case MODULE_STATE_GOING:
9629 trace_module_remove_evals(mod);
9636 static struct notifier_block trace_module_nb = {
9637 .notifier_call = trace_module_notify,
9640 #endif /* CONFIG_MODULES */
9642 static __init int tracer_init_tracefs(void)
9646 trace_access_lock_init();
9648 ret = tracing_init_dentry();
9654 init_tracer_tracefs(&global_trace, NULL);
9655 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9657 trace_create_file("tracing_thresh", 0644, NULL,
9658 &global_trace, &tracing_thresh_fops);
9660 trace_create_file("README", 0444, NULL,
9661 NULL, &tracing_readme_fops);
9663 trace_create_file("saved_cmdlines", 0444, NULL,
9664 NULL, &tracing_saved_cmdlines_fops);
9666 trace_create_file("saved_cmdlines_size", 0644, NULL,
9667 NULL, &tracing_saved_cmdlines_size_fops);
9669 trace_create_file("saved_tgids", 0444, NULL,
9670 NULL, &tracing_saved_tgids_fops);
9674 trace_create_eval_file(NULL);
9676 #ifdef CONFIG_MODULES
9677 register_module_notifier(&trace_module_nb);
9680 #ifdef CONFIG_DYNAMIC_FTRACE
9681 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
9682 NULL, &tracing_dyn_info_fops);
9685 create_trace_instances(NULL);
9687 update_tracer_options(&global_trace);
9692 fs_initcall(tracer_init_tracefs);
9694 static int trace_panic_handler(struct notifier_block *this,
9695 unsigned long event, void *unused)
9697 if (ftrace_dump_on_oops)
9698 ftrace_dump(ftrace_dump_on_oops);
9702 static struct notifier_block trace_panic_notifier = {
9703 .notifier_call = trace_panic_handler,
9705 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9708 static int trace_die_handler(struct notifier_block *self,
9714 if (ftrace_dump_on_oops)
9715 ftrace_dump(ftrace_dump_on_oops);
9723 static struct notifier_block trace_die_notifier = {
9724 .notifier_call = trace_die_handler,
9729 * printk is set to max of 1024, we really don't need it that big.
9730 * Nothing should be printing 1000 characters anyway.
9732 #define TRACE_MAX_PRINT 1000
9735 * Define here KERN_TRACE so that we have one place to modify
9736 * it if we decide to change what log level the ftrace dump
9739 #define KERN_TRACE KERN_EMERG
9742 trace_printk_seq(struct trace_seq *s)
9744 /* Probably should print a warning here. */
9745 if (s->seq.len >= TRACE_MAX_PRINT)
9746 s->seq.len = TRACE_MAX_PRINT;
9749 * More paranoid code. Although the buffer size is set to
9750 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9751 * an extra layer of protection.
9753 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9754 s->seq.len = s->seq.size - 1;
9756 /* should be zero ended, but we are paranoid. */
9757 s->buffer[s->seq.len] = 0;
9759 printk(KERN_TRACE "%s", s->buffer);
9764 void trace_init_global_iter(struct trace_iterator *iter)
9766 iter->tr = &global_trace;
9767 iter->trace = iter->tr->current_trace;
9768 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9769 iter->array_buffer = &global_trace.array_buffer;
9771 if (iter->trace && iter->trace->open)
9772 iter->trace->open(iter);
9774 /* Annotate start of buffers if we had overruns */
9775 if (ring_buffer_overruns(iter->array_buffer->buffer))
9776 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9778 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9779 if (trace_clocks[iter->tr->clock_id].in_ns)
9780 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9783 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9785 /* use static because iter can be a bit big for the stack */
9786 static struct trace_iterator iter;
9787 static atomic_t dump_running;
9788 struct trace_array *tr = &global_trace;
9789 unsigned int old_userobj;
9790 unsigned long flags;
9793 /* Only allow one dump user at a time. */
9794 if (atomic_inc_return(&dump_running) != 1) {
9795 atomic_dec(&dump_running);
9800 * Always turn off tracing when we dump.
9801 * We don't need to show trace output of what happens
9802 * between multiple crashes.
9804 * If the user does a sysrq-z, then they can re-enable
9805 * tracing with echo 1 > tracing_on.
9809 local_irq_save(flags);
9810 printk_nmi_direct_enter();
9812 /* Simulate the iterator */
9813 trace_init_global_iter(&iter);
9814 /* Can not use kmalloc for iter.temp and iter.fmt */
9815 iter.temp = static_temp_buf;
9816 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9817 iter.fmt = static_fmt_buf;
9818 iter.fmt_size = STATIC_FMT_BUF_SIZE;
9820 for_each_tracing_cpu(cpu) {
9821 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9824 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9826 /* don't look at user memory in panic mode */
9827 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9829 switch (oops_dump_mode) {
9831 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9834 iter.cpu_file = raw_smp_processor_id();
9839 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9840 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9843 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9845 /* Did function tracer already get disabled? */
9846 if (ftrace_is_dead()) {
9847 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9848 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9852 * We need to stop all tracing on all CPUS to read
9853 * the next buffer. This is a bit expensive, but is
9854 * not done often. We fill all what we can read,
9855 * and then release the locks again.
9858 while (!trace_empty(&iter)) {
9861 printk(KERN_TRACE "---------------------------------\n");
9865 trace_iterator_reset(&iter);
9866 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9868 if (trace_find_next_entry_inc(&iter) != NULL) {
9871 ret = print_trace_line(&iter);
9872 if (ret != TRACE_TYPE_NO_CONSUME)
9873 trace_consume(&iter);
9875 touch_nmi_watchdog();
9877 trace_printk_seq(&iter.seq);
9881 printk(KERN_TRACE " (ftrace buffer empty)\n");
9883 printk(KERN_TRACE "---------------------------------\n");
9886 tr->trace_flags |= old_userobj;
9888 for_each_tracing_cpu(cpu) {
9889 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9891 atomic_dec(&dump_running);
9892 printk_nmi_direct_exit();
9893 local_irq_restore(flags);
9895 EXPORT_SYMBOL_GPL(ftrace_dump);
9897 #define WRITE_BUFSIZE 4096
9899 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9900 size_t count, loff_t *ppos,
9901 int (*createfn)(const char *))
9903 char *kbuf, *buf, *tmp;
9908 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9912 while (done < count) {
9913 size = count - done;
9915 if (size >= WRITE_BUFSIZE)
9916 size = WRITE_BUFSIZE - 1;
9918 if (copy_from_user(kbuf, buffer + done, size)) {
9925 tmp = strchr(buf, '\n');
9928 size = tmp - buf + 1;
9931 if (done + size < count) {
9934 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9935 pr_warn("Line length is too long: Should be less than %d\n",
9943 /* Remove comments */
9944 tmp = strchr(buf, '#');
9949 ret = createfn(buf);
9954 } while (done < count);
9964 __init static int tracer_alloc_buffers(void)
9970 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9971 pr_warn("Tracing disabled due to lockdown\n");
9976 * Make sure we don't accidentally add more trace options
9977 * than we have bits for.
9979 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9981 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9984 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9985 goto out_free_buffer_mask;
9987 /* Only allocate trace_printk buffers if a trace_printk exists */
9988 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9989 /* Must be called before global_trace.buffer is allocated */
9990 trace_printk_init_buffers();
9992 /* To save memory, keep the ring buffer size to its minimum */
9993 if (ring_buffer_expanded)
9994 ring_buf_size = trace_buf_size;
9998 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9999 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10001 raw_spin_lock_init(&global_trace.start_lock);
10004 * The prepare callbacks allocates some memory for the ring buffer. We
10005 * don't free the buffer if the CPU goes down. If we were to free
10006 * the buffer, then the user would lose any trace that was in the
10007 * buffer. The memory will be removed once the "instance" is removed.
10009 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10010 "trace/RB:preapre", trace_rb_cpu_prepare,
10013 goto out_free_cpumask;
10014 /* Used for event triggers */
10016 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10018 goto out_rm_hp_state;
10020 if (trace_create_savedcmd() < 0)
10021 goto out_free_temp_buffer;
10023 /* TODO: make the number of buffers hot pluggable with CPUS */
10024 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10025 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10026 goto out_free_savedcmd;
10029 if (global_trace.buffer_disabled)
10032 if (trace_boot_clock) {
10033 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10035 pr_warn("Trace clock %s not defined, going back to default\n",
10040 * register_tracer() might reference current_trace, so it
10041 * needs to be set before we register anything. This is
10042 * just a bootstrap of current_trace anyway.
10044 global_trace.current_trace = &nop_trace;
10046 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10048 ftrace_init_global_array_ops(&global_trace);
10050 init_trace_flags_index(&global_trace);
10052 register_tracer(&nop_trace);
10054 /* Function tracing may start here (via kernel command line) */
10055 init_function_trace();
10057 /* All seems OK, enable tracing */
10058 tracing_disabled = 0;
10060 atomic_notifier_chain_register(&panic_notifier_list,
10061 &trace_panic_notifier);
10063 register_die_notifier(&trace_die_notifier);
10065 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10067 INIT_LIST_HEAD(&global_trace.systems);
10068 INIT_LIST_HEAD(&global_trace.events);
10069 INIT_LIST_HEAD(&global_trace.hist_vars);
10070 INIT_LIST_HEAD(&global_trace.err_log);
10071 list_add(&global_trace.list, &ftrace_trace_arrays);
10073 apply_trace_boot_options();
10075 register_snapshot_cmd();
10082 free_saved_cmdlines_buffer(savedcmd);
10083 out_free_temp_buffer:
10084 ring_buffer_free(temp_buffer);
10086 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10088 free_cpumask_var(global_trace.tracing_cpumask);
10089 out_free_buffer_mask:
10090 free_cpumask_var(tracing_buffer_mask);
10095 void __init early_trace_init(void)
10097 if (tracepoint_printk) {
10098 tracepoint_print_iter =
10099 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10100 if (MEM_FAIL(!tracepoint_print_iter,
10101 "Failed to allocate trace iterator\n"))
10102 tracepoint_printk = 0;
10104 static_key_enable(&tracepoint_printk_key.key);
10106 tracer_alloc_buffers();
10109 void __init trace_init(void)
10111 trace_event_init();
10114 __init static void clear_boot_tracer(void)
10117 * The default tracer at boot buffer is an init section.
10118 * This function is called in lateinit. If we did not
10119 * find the boot tracer, then clear it out, to prevent
10120 * later registration from accessing the buffer that is
10121 * about to be freed.
10123 if (!default_bootup_tracer)
10126 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10127 default_bootup_tracer);
10128 default_bootup_tracer = NULL;
10131 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10132 __init static void tracing_set_default_clock(void)
10134 /* sched_clock_stable() is determined in late_initcall */
10135 if (!trace_boot_clock && !sched_clock_stable()) {
10136 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10137 pr_warn("Can not set tracing clock due to lockdown\n");
10141 printk(KERN_WARNING
10142 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10143 "If you want to keep using the local clock, then add:\n"
10144 " \"trace_clock=local\"\n"
10145 "on the kernel command line\n");
10146 tracing_set_clock(&global_trace, "global");
10150 static inline void tracing_set_default_clock(void) { }
10153 __init static int late_trace_init(void)
10155 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10156 static_key_disable(&tracepoint_printk_key.key);
10157 tracepoint_printk = 0;
10160 tracing_set_default_clock();
10161 clear_boot_tracer();
10165 late_initcall_sync(late_trace_init);