2 * ring buffer based function tracer
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/debugfs.h>
23 #include <linux/tracefs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/mount.h>
36 #include <linux/rwsem.h>
37 #include <linux/slab.h>
38 #include <linux/ctype.h>
39 #include <linux/init.h>
40 #include <linux/poll.h>
41 #include <linux/nmi.h>
43 #include <linux/trace.h>
44 #include <linux/sched/clock.h>
45 #include <linux/sched/rt.h>
48 #include "trace_output.h"
51 * On boot up, the ring buffer is set to the minimum size, so that
52 * we do not waste memory on systems that are not using tracing.
54 bool ring_buffer_expanded;
57 * We need to change this state when a selftest is running.
58 * A selftest will lurk into the ring-buffer to count the
59 * entries inserted during the selftest although some concurrent
60 * insertions into the ring-buffer such as trace_printk could occurred
61 * at the same time, giving false positive or negative results.
63 static bool __read_mostly tracing_selftest_running;
66 * If a tracer is running, we do not want to run SELFTEST.
68 bool __read_mostly tracing_selftest_disabled;
70 /* Pipe tracepoints to printk */
71 struct trace_iterator *tracepoint_print_iter;
72 int tracepoint_printk;
73 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
75 /* For tracers that don't implement custom flags */
76 static struct tracer_opt dummy_tracer_opt[] = {
81 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
87 * To prevent the comm cache from being overwritten when no
88 * tracing is active, only save the comm when a trace event
91 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
94 * Kill all tracing for good (never come back).
95 * It is initialized to 1 but will turn to zero if the initialization
96 * of the tracer is successful. But that is the only place that sets
99 static int tracing_disabled = 1;
101 cpumask_var_t __read_mostly tracing_buffer_mask;
104 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
106 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
107 * is set, then ftrace_dump is called. This will output the contents
108 * of the ftrace buffers to the console. This is very useful for
109 * capturing traces that lead to crashes and outputing it to a
112 * It is default off, but you can enable it with either specifying
113 * "ftrace_dump_on_oops" in the kernel command line, or setting
114 * /proc/sys/kernel/ftrace_dump_on_oops
115 * Set 1 if you want to dump buffers of all CPUs
116 * Set 2 if you want to dump the buffer of the CPU that triggered oops
119 enum ftrace_dump_mode ftrace_dump_on_oops;
121 /* When set, tracing will stop when a WARN*() is hit */
122 int __disable_trace_on_warning;
124 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
125 /* Map of enums to their values, for "eval_map" file */
126 struct trace_eval_map_head {
128 unsigned long length;
131 union trace_eval_map_item;
133 struct trace_eval_map_tail {
135 * "end" is first and points to NULL as it must be different
136 * than "mod" or "eval_string"
138 union trace_eval_map_item *next;
139 const char *end; /* points to NULL */
142 static DEFINE_MUTEX(trace_eval_mutex);
145 * The trace_eval_maps are saved in an array with two extra elements,
146 * one at the beginning, and one at the end. The beginning item contains
147 * the count of the saved maps (head.length), and the module they
148 * belong to if not built in (head.mod). The ending item contains a
149 * pointer to the next array of saved eval_map items.
151 union trace_eval_map_item {
152 struct trace_eval_map map;
153 struct trace_eval_map_head head;
154 struct trace_eval_map_tail tail;
157 static union trace_eval_map_item *trace_eval_maps;
158 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
160 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
162 #define MAX_TRACER_SIZE 100
163 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
164 static char *default_bootup_tracer;
166 static bool allocate_snapshot;
168 static int __init set_cmdline_ftrace(char *str)
170 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
171 default_bootup_tracer = bootup_tracer_buf;
172 /* We are using ftrace early, expand it */
173 ring_buffer_expanded = true;
176 __setup("ftrace=", set_cmdline_ftrace);
178 static int __init set_ftrace_dump_on_oops(char *str)
180 if (*str++ != '=' || !*str) {
181 ftrace_dump_on_oops = DUMP_ALL;
185 if (!strcmp("orig_cpu", str)) {
186 ftrace_dump_on_oops = DUMP_ORIG;
192 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
194 static int __init stop_trace_on_warning(char *str)
196 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
197 __disable_trace_on_warning = 1;
200 __setup("traceoff_on_warning", stop_trace_on_warning);
202 static int __init boot_alloc_snapshot(char *str)
204 allocate_snapshot = true;
205 /* We also need the main ring buffer expanded */
206 ring_buffer_expanded = true;
209 __setup("alloc_snapshot", boot_alloc_snapshot);
212 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
214 static int __init set_trace_boot_options(char *str)
216 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
219 __setup("trace_options=", set_trace_boot_options);
221 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
222 static char *trace_boot_clock __initdata;
224 static int __init set_trace_boot_clock(char *str)
226 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
227 trace_boot_clock = trace_boot_clock_buf;
230 __setup("trace_clock=", set_trace_boot_clock);
232 static int __init set_tracepoint_printk(char *str)
234 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
235 tracepoint_printk = 1;
238 __setup("tp_printk", set_tracepoint_printk);
240 unsigned long long ns2usecs(u64 nsec)
247 /* trace_flags holds trace_options default values */
248 #define TRACE_DEFAULT_FLAGS \
249 (FUNCTION_DEFAULT_FLAGS | \
250 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
251 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
252 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
253 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
255 /* trace_options that are only supported by global_trace */
256 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
257 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
259 /* trace_flags that are default zero for instances */
260 #define ZEROED_TRACE_FLAGS \
261 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
264 * The global_trace is the descriptor that holds the top-level tracing
265 * buffers for the live tracing.
267 static struct trace_array global_trace = {
268 .trace_flags = TRACE_DEFAULT_FLAGS,
271 LIST_HEAD(ftrace_trace_arrays);
273 int trace_array_get(struct trace_array *this_tr)
275 struct trace_array *tr;
278 mutex_lock(&trace_types_lock);
279 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
286 mutex_unlock(&trace_types_lock);
291 static void __trace_array_put(struct trace_array *this_tr)
293 WARN_ON(!this_tr->ref);
297 void trace_array_put(struct trace_array *this_tr)
299 mutex_lock(&trace_types_lock);
300 __trace_array_put(this_tr);
301 mutex_unlock(&trace_types_lock);
304 int call_filter_check_discard(struct trace_event_call *call, void *rec,
305 struct ring_buffer *buffer,
306 struct ring_buffer_event *event)
308 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
309 !filter_match_preds(call->filter, rec)) {
310 __trace_event_discard_commit(buffer, event);
317 void trace_free_pid_list(struct trace_pid_list *pid_list)
319 vfree(pid_list->pids);
324 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
325 * @filtered_pids: The list of pids to check
326 * @search_pid: The PID to find in @filtered_pids
328 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
331 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
334 * If pid_max changed after filtered_pids was created, we
335 * by default ignore all pids greater than the previous pid_max.
337 if (search_pid >= filtered_pids->pid_max)
340 return test_bit(search_pid, filtered_pids->pids);
344 * trace_ignore_this_task - should a task be ignored for tracing
345 * @filtered_pids: The list of pids to check
346 * @task: The task that should be ignored if not filtered
348 * Checks if @task should be traced or not from @filtered_pids.
349 * Returns true if @task should *NOT* be traced.
350 * Returns false if @task should be traced.
353 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
356 * Return false, because if filtered_pids does not exist,
357 * all pids are good to trace.
362 return !trace_find_filtered_pid(filtered_pids, task->pid);
366 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
367 * @pid_list: The list to modify
368 * @self: The current task for fork or NULL for exit
369 * @task: The task to add or remove
371 * If adding a task, if @self is defined, the task is only added if @self
372 * is also included in @pid_list. This happens on fork and tasks should
373 * only be added when the parent is listed. If @self is NULL, then the
374 * @task pid will be removed from the list, which would happen on exit
377 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
378 struct task_struct *self,
379 struct task_struct *task)
384 /* For forks, we only add if the forking task is listed */
386 if (!trace_find_filtered_pid(pid_list, self->pid))
390 /* Sorry, but we don't support pid_max changing after setting */
391 if (task->pid >= pid_list->pid_max)
394 /* "self" is set for forks, and NULL for exits */
396 set_bit(task->pid, pid_list->pids);
398 clear_bit(task->pid, pid_list->pids);
402 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
403 * @pid_list: The pid list to show
404 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
405 * @pos: The position of the file
407 * This is used by the seq_file "next" operation to iterate the pids
408 * listed in a trace_pid_list structure.
410 * Returns the pid+1 as we want to display pid of zero, but NULL would
411 * stop the iteration.
413 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
415 unsigned long pid = (unsigned long)v;
419 /* pid already is +1 of the actual prevous bit */
420 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
422 /* Return pid + 1 to allow zero to be represented */
423 if (pid < pid_list->pid_max)
424 return (void *)(pid + 1);
430 * trace_pid_start - Used for seq_file to start reading pid lists
431 * @pid_list: The pid list to show
432 * @pos: The position of the file
434 * This is used by seq_file "start" operation to start the iteration
437 * Returns the pid+1 as we want to display pid of zero, but NULL would
438 * stop the iteration.
440 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
445 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
446 if (pid >= pid_list->pid_max)
449 /* Return pid + 1 so that zero can be the exit value */
450 for (pid++; pid && l < *pos;
451 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
457 * trace_pid_show - show the current pid in seq_file processing
458 * @m: The seq_file structure to write into
459 * @v: A void pointer of the pid (+1) value to display
461 * Can be directly used by seq_file operations to display the current
464 int trace_pid_show(struct seq_file *m, void *v)
466 unsigned long pid = (unsigned long)v - 1;
468 seq_printf(m, "%lu\n", pid);
472 /* 128 should be much more than enough */
473 #define PID_BUF_SIZE 127
475 int trace_pid_write(struct trace_pid_list *filtered_pids,
476 struct trace_pid_list **new_pid_list,
477 const char __user *ubuf, size_t cnt)
479 struct trace_pid_list *pid_list;
480 struct trace_parser parser;
488 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
492 * Always recreate a new array. The write is an all or nothing
493 * operation. Always create a new array when adding new pids by
494 * the user. If the operation fails, then the current list is
497 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
501 pid_list->pid_max = READ_ONCE(pid_max);
503 /* Only truncating will shrink pid_max */
504 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
505 pid_list->pid_max = filtered_pids->pid_max;
507 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
508 if (!pid_list->pids) {
514 /* copy the current bits to the new max */
515 for_each_set_bit(pid, filtered_pids->pids,
516 filtered_pids->pid_max) {
517 set_bit(pid, pid_list->pids);
526 ret = trace_get_user(&parser, ubuf, cnt, &pos);
527 if (ret < 0 || !trace_parser_loaded(&parser))
535 if (kstrtoul(parser.buffer, 0, &val))
537 if (val >= pid_list->pid_max)
542 set_bit(pid, pid_list->pids);
545 trace_parser_clear(&parser);
548 trace_parser_put(&parser);
551 trace_free_pid_list(pid_list);
556 /* Cleared the list of pids */
557 trace_free_pid_list(pid_list);
562 *new_pid_list = pid_list;
567 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
571 /* Early boot up does not have a buffer yet */
573 return trace_clock_local();
575 ts = ring_buffer_time_stamp(buf->buffer, cpu);
576 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
581 u64 ftrace_now(int cpu)
583 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
587 * tracing_is_enabled - Show if global_trace has been disabled
589 * Shows if the global trace has been enabled or not. It uses the
590 * mirror flag "buffer_disabled" to be used in fast paths such as for
591 * the irqsoff tracer. But it may be inaccurate due to races. If you
592 * need to know the accurate state, use tracing_is_on() which is a little
593 * slower, but accurate.
595 int tracing_is_enabled(void)
598 * For quick access (irqsoff uses this in fast path), just
599 * return the mirror variable of the state of the ring buffer.
600 * It's a little racy, but we don't really care.
603 return !global_trace.buffer_disabled;
607 * trace_buf_size is the size in bytes that is allocated
608 * for a buffer. Note, the number of bytes is always rounded
611 * This number is purposely set to a low number of 16384.
612 * If the dump on oops happens, it will be much appreciated
613 * to not have to wait for all that output. Anyway this can be
614 * boot time and run time configurable.
616 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
618 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
620 /* trace_types holds a link list of available tracers. */
621 static struct tracer *trace_types __read_mostly;
624 * trace_types_lock is used to protect the trace_types list.
626 DEFINE_MUTEX(trace_types_lock);
629 * serialize the access of the ring buffer
631 * ring buffer serializes readers, but it is low level protection.
632 * The validity of the events (which returns by ring_buffer_peek() ..etc)
633 * are not protected by ring buffer.
635 * The content of events may become garbage if we allow other process consumes
636 * these events concurrently:
637 * A) the page of the consumed events may become a normal page
638 * (not reader page) in ring buffer, and this page will be rewrited
639 * by events producer.
640 * B) The page of the consumed events may become a page for splice_read,
641 * and this page will be returned to system.
643 * These primitives allow multi process access to different cpu ring buffer
646 * These primitives don't distinguish read-only and read-consume access.
647 * Multi read-only access are also serialized.
651 static DECLARE_RWSEM(all_cpu_access_lock);
652 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654 static inline void trace_access_lock(int cpu)
656 if (cpu == RING_BUFFER_ALL_CPUS) {
657 /* gain it for accessing the whole ring buffer. */
658 down_write(&all_cpu_access_lock);
660 /* gain it for accessing a cpu ring buffer. */
662 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
663 down_read(&all_cpu_access_lock);
665 /* Secondly block other access to this @cpu ring buffer. */
666 mutex_lock(&per_cpu(cpu_access_lock, cpu));
670 static inline void trace_access_unlock(int cpu)
672 if (cpu == RING_BUFFER_ALL_CPUS) {
673 up_write(&all_cpu_access_lock);
675 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
676 up_read(&all_cpu_access_lock);
680 static inline void trace_access_lock_init(void)
684 for_each_possible_cpu(cpu)
685 mutex_init(&per_cpu(cpu_access_lock, cpu));
690 static DEFINE_MUTEX(access_lock);
692 static inline void trace_access_lock(int cpu)
695 mutex_lock(&access_lock);
698 static inline void trace_access_unlock(int cpu)
701 mutex_unlock(&access_lock);
704 static inline void trace_access_lock_init(void)
710 #ifdef CONFIG_STACKTRACE
711 static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 int skip, int pc, struct pt_regs *regs);
714 static inline void ftrace_trace_stack(struct trace_array *tr,
715 struct ring_buffer *buffer,
717 int skip, int pc, struct pt_regs *regs);
720 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 int skip, int pc, struct pt_regs *regs)
725 static inline void ftrace_trace_stack(struct trace_array *tr,
726 struct ring_buffer *buffer,
728 int skip, int pc, struct pt_regs *regs)
734 static __always_inline void
735 trace_event_setup(struct ring_buffer_event *event,
736 int type, unsigned long flags, int pc)
738 struct trace_entry *ent = ring_buffer_event_data(event);
740 tracing_generic_entry_update(ent, flags, pc);
744 static __always_inline struct ring_buffer_event *
745 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
748 unsigned long flags, int pc)
750 struct ring_buffer_event *event;
752 event = ring_buffer_lock_reserve(buffer, len);
754 trace_event_setup(event, type, flags, pc);
759 void tracer_tracing_on(struct trace_array *tr)
761 if (tr->trace_buffer.buffer)
762 ring_buffer_record_on(tr->trace_buffer.buffer);
764 * This flag is looked at when buffers haven't been allocated
765 * yet, or by some tracers (like irqsoff), that just want to
766 * know if the ring buffer has been disabled, but it can handle
767 * races of where it gets disabled but we still do a record.
768 * As the check is in the fast path of the tracers, it is more
769 * important to be fast than accurate.
771 tr->buffer_disabled = 0;
772 /* Make the flag seen by readers */
777 * tracing_on - enable tracing buffers
779 * This function enables tracing buffers that may have been
780 * disabled with tracing_off.
782 void tracing_on(void)
784 tracer_tracing_on(&global_trace);
786 EXPORT_SYMBOL_GPL(tracing_on);
789 static __always_inline void
790 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792 __this_cpu_write(trace_taskinfo_save, true);
794 /* If this is the temp buffer, we need to commit fully */
795 if (this_cpu_read(trace_buffered_event) == event) {
796 /* Length is in event->array[0] */
797 ring_buffer_write(buffer, event->array[0], &event->array[1]);
798 /* Release the temp buffer */
799 this_cpu_dec(trace_buffered_event_cnt);
801 ring_buffer_unlock_commit(buffer, event);
805 * __trace_puts - write a constant string into the trace buffer.
806 * @ip: The address of the caller
807 * @str: The constant string to write
808 * @size: The size of the string.
810 int __trace_puts(unsigned long ip, const char *str, int size)
812 struct ring_buffer_event *event;
813 struct ring_buffer *buffer;
814 struct print_entry *entry;
815 unsigned long irq_flags;
819 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
822 pc = preempt_count();
824 if (unlikely(tracing_selftest_running || tracing_disabled))
827 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829 local_save_flags(irq_flags);
830 buffer = global_trace.trace_buffer.buffer;
831 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
836 entry = ring_buffer_event_data(event);
839 memcpy(&entry->buf, str, size);
841 /* Add a newline if necessary */
842 if (entry->buf[size - 1] != '\n') {
843 entry->buf[size] = '\n';
844 entry->buf[size + 1] = '\0';
846 entry->buf[size] = '\0';
848 __buffer_unlock_commit(buffer, event);
849 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
853 EXPORT_SYMBOL_GPL(__trace_puts);
856 * __trace_bputs - write the pointer to a constant string into trace buffer
857 * @ip: The address of the caller
858 * @str: The constant string to write to the buffer to
860 int __trace_bputs(unsigned long ip, const char *str)
862 struct ring_buffer_event *event;
863 struct ring_buffer *buffer;
864 struct bputs_entry *entry;
865 unsigned long irq_flags;
866 int size = sizeof(struct bputs_entry);
869 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
872 pc = preempt_count();
874 if (unlikely(tracing_selftest_running || tracing_disabled))
877 local_save_flags(irq_flags);
878 buffer = global_trace.trace_buffer.buffer;
879 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
884 entry = ring_buffer_event_data(event);
888 __buffer_unlock_commit(buffer, event);
889 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
893 EXPORT_SYMBOL_GPL(__trace_bputs);
895 #ifdef CONFIG_TRACER_SNAPSHOT
896 void tracing_snapshot_instance(struct trace_array *tr)
898 struct tracer *tracer = tr->current_trace;
902 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
903 internal_trace_puts("*** snapshot is being ignored ***\n");
907 if (!tr->allocated_snapshot) {
908 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
909 internal_trace_puts("*** stopping trace here! ***\n");
914 /* Note, snapshot can not be used when the tracer uses it */
915 if (tracer->use_max_tr) {
916 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
917 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
921 local_irq_save(flags);
922 update_max_tr(tr, current, smp_processor_id());
923 local_irq_restore(flags);
927 * tracing_snapshot - take a snapshot of the current buffer.
929 * This causes a swap between the snapshot buffer and the current live
930 * tracing buffer. You can use this to take snapshots of the live
931 * trace when some condition is triggered, but continue to trace.
933 * Note, make sure to allocate the snapshot with either
934 * a tracing_snapshot_alloc(), or by doing it manually
935 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 * If the snapshot buffer is not allocated, it will stop tracing.
938 * Basically making a permanent snapshot.
940 void tracing_snapshot(void)
942 struct trace_array *tr = &global_trace;
944 tracing_snapshot_instance(tr);
946 EXPORT_SYMBOL_GPL(tracing_snapshot);
948 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
949 struct trace_buffer *size_buf, int cpu_id);
950 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952 int tracing_alloc_snapshot_instance(struct trace_array *tr)
956 if (!tr->allocated_snapshot) {
958 /* allocate spare buffer */
959 ret = resize_buffer_duplicate_size(&tr->max_buffer,
960 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
964 tr->allocated_snapshot = true;
970 static void free_snapshot(struct trace_array *tr)
973 * We don't free the ring buffer. instead, resize it because
974 * The max_tr ring buffer has some state (e.g. ring->clock) and
975 * we want preserve it.
977 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
978 set_buffer_entries(&tr->max_buffer, 1);
979 tracing_reset_online_cpus(&tr->max_buffer);
980 tr->allocated_snapshot = false;
984 * tracing_alloc_snapshot - allocate snapshot buffer.
986 * This only allocates the snapshot buffer if it isn't already
987 * allocated - it doesn't also take a snapshot.
989 * This is meant to be used in cases where the snapshot buffer needs
990 * to be set up for events that can't sleep but need to be able to
991 * trigger a snapshot.
993 int tracing_alloc_snapshot(void)
995 struct trace_array *tr = &global_trace;
998 ret = tracing_alloc_snapshot_instance(tr);
1003 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1006 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 * This is similar to tracing_snapshot(), but it will allocate the
1009 * snapshot buffer if it isn't already allocated. Use this only
1010 * where it is safe to sleep, as the allocation may sleep.
1012 * This causes a swap between the snapshot buffer and the current live
1013 * tracing buffer. You can use this to take snapshots of the live
1014 * trace when some condition is triggered, but continue to trace.
1016 void tracing_snapshot_alloc(void)
1020 ret = tracing_alloc_snapshot();
1026 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1028 void tracing_snapshot(void)
1030 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032 EXPORT_SYMBOL_GPL(tracing_snapshot);
1033 int tracing_alloc_snapshot(void)
1035 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1038 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1039 void tracing_snapshot_alloc(void)
1044 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1045 #endif /* CONFIG_TRACER_SNAPSHOT */
1047 void tracer_tracing_off(struct trace_array *tr)
1049 if (tr->trace_buffer.buffer)
1050 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 * This flag is looked at when buffers haven't been allocated
1053 * yet, or by some tracers (like irqsoff), that just want to
1054 * know if the ring buffer has been disabled, but it can handle
1055 * races of where it gets disabled but we still do a record.
1056 * As the check is in the fast path of the tracers, it is more
1057 * important to be fast than accurate.
1059 tr->buffer_disabled = 1;
1060 /* Make the flag seen by readers */
1065 * tracing_off - turn off tracing buffers
1067 * This function stops the tracing buffers from recording data.
1068 * It does not disable any overhead the tracers themselves may
1069 * be causing. This function simply causes all recording to
1070 * the ring buffers to fail.
1072 void tracing_off(void)
1074 tracer_tracing_off(&global_trace);
1076 EXPORT_SYMBOL_GPL(tracing_off);
1078 void disable_trace_on_warning(void)
1080 if (__disable_trace_on_warning)
1085 * tracer_tracing_is_on - show real state of ring buffer enabled
1086 * @tr : the trace array to know if ring buffer is enabled
1088 * Shows real state of the ring buffer if it is enabled or not.
1090 int tracer_tracing_is_on(struct trace_array *tr)
1092 if (tr->trace_buffer.buffer)
1093 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1094 return !tr->buffer_disabled;
1098 * tracing_is_on - show state of ring buffers enabled
1100 int tracing_is_on(void)
1102 return tracer_tracing_is_on(&global_trace);
1104 EXPORT_SYMBOL_GPL(tracing_is_on);
1106 static int __init set_buf_size(char *str)
1108 unsigned long buf_size;
1112 buf_size = memparse(str, &str);
1113 /* nr_entries can not be zero */
1116 trace_buf_size = buf_size;
1119 __setup("trace_buf_size=", set_buf_size);
1121 static int __init set_tracing_thresh(char *str)
1123 unsigned long threshold;
1128 ret = kstrtoul(str, 0, &threshold);
1131 tracing_thresh = threshold * 1000;
1134 __setup("tracing_thresh=", set_tracing_thresh);
1136 unsigned long nsecs_to_usecs(unsigned long nsecs)
1138 return nsecs / 1000;
1142 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1143 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1144 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1145 * of strings in the order that the evals (enum) were defined.
1150 /* These must match the bit postions in trace_iterator_flags */
1151 static const char *trace_options[] = {
1159 int in_ns; /* is this clock in nanoseconds? */
1160 } trace_clocks[] = {
1161 { trace_clock_local, "local", 1 },
1162 { trace_clock_global, "global", 1 },
1163 { trace_clock_counter, "counter", 0 },
1164 { trace_clock_jiffies, "uptime", 0 },
1165 { trace_clock, "perf", 1 },
1166 { ktime_get_mono_fast_ns, "mono", 1 },
1167 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1168 { ktime_get_boot_fast_ns, "boot", 1 },
1172 bool trace_clock_in_ns(struct trace_array *tr)
1174 if (trace_clocks[tr->clock_id].in_ns)
1181 * trace_parser_get_init - gets the buffer for trace parser
1183 int trace_parser_get_init(struct trace_parser *parser, int size)
1185 memset(parser, 0, sizeof(*parser));
1187 parser->buffer = kmalloc(size, GFP_KERNEL);
1188 if (!parser->buffer)
1191 parser->size = size;
1196 * trace_parser_put - frees the buffer for trace parser
1198 void trace_parser_put(struct trace_parser *parser)
1200 kfree(parser->buffer);
1201 parser->buffer = NULL;
1205 * trace_get_user - reads the user input string separated by space
1206 * (matched by isspace(ch))
1208 * For each string found the 'struct trace_parser' is updated,
1209 * and the function returns.
1211 * Returns number of bytes read.
1213 * See kernel/trace/trace.h for 'struct trace_parser' details.
1215 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1216 size_t cnt, loff_t *ppos)
1223 trace_parser_clear(parser);
1225 ret = get_user(ch, ubuf++);
1233 * The parser is not finished with the last write,
1234 * continue reading the user input without skipping spaces.
1236 if (!parser->cont) {
1237 /* skip white space */
1238 while (cnt && isspace(ch)) {
1239 ret = get_user(ch, ubuf++);
1248 /* only spaces were written */
1249 if (isspace(ch) || !ch) {
1256 /* read the non-space input */
1257 while (cnt && !isspace(ch) && ch) {
1258 if (parser->idx < parser->size - 1)
1259 parser->buffer[parser->idx++] = ch;
1264 ret = get_user(ch, ubuf++);
1271 /* We either got finished input or we have to wait for another call. */
1272 if (isspace(ch) || !ch) {
1273 parser->buffer[parser->idx] = 0;
1274 parser->cont = false;
1275 } else if (parser->idx < parser->size - 1) {
1276 parser->cont = true;
1277 parser->buffer[parser->idx++] = ch;
1278 /* Make sure the parsed string always terminates with '\0'. */
1279 parser->buffer[parser->idx] = 0;
1292 /* TODO add a seq_buf_to_buffer() */
1293 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1297 if (trace_seq_used(s) <= s->seq.readpos)
1300 len = trace_seq_used(s) - s->seq.readpos;
1303 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1305 s->seq.readpos += cnt;
1309 unsigned long __read_mostly tracing_thresh;
1311 #ifdef CONFIG_TRACER_MAX_TRACE
1313 * Copy the new maximum trace into the separate maximum-trace
1314 * structure. (this way the maximum trace is permanently saved,
1315 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1318 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1320 struct trace_buffer *trace_buf = &tr->trace_buffer;
1321 struct trace_buffer *max_buf = &tr->max_buffer;
1322 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1323 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1326 max_buf->time_start = data->preempt_timestamp;
1328 max_data->saved_latency = tr->max_latency;
1329 max_data->critical_start = data->critical_start;
1330 max_data->critical_end = data->critical_end;
1332 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1333 max_data->pid = tsk->pid;
1335 * If tsk == current, then use current_uid(), as that does not use
1336 * RCU. The irq tracer can be called out of RCU scope.
1339 max_data->uid = current_uid();
1341 max_data->uid = task_uid(tsk);
1343 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1344 max_data->policy = tsk->policy;
1345 max_data->rt_priority = tsk->rt_priority;
1347 /* record this tasks comm */
1348 tracing_record_cmdline(tsk);
1352 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1354 * @tsk: the task with the latency
1355 * @cpu: The cpu that initiated the trace.
1357 * Flip the buffers between the @tr and the max_tr and record information
1358 * about which task was the cause of this latency.
1361 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1363 struct ring_buffer *buf;
1368 WARN_ON_ONCE(!irqs_disabled());
1370 if (!tr->allocated_snapshot) {
1371 /* Only the nop tracer should hit this when disabling */
1372 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1376 arch_spin_lock(&tr->max_lock);
1378 buf = tr->trace_buffer.buffer;
1379 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1380 tr->max_buffer.buffer = buf;
1382 __update_max_tr(tr, tsk, cpu);
1383 arch_spin_unlock(&tr->max_lock);
1387 * update_max_tr_single - only copy one trace over, and reset the rest
1389 * @tsk - task with the latency
1390 * @cpu - the cpu of the buffer to copy.
1392 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1395 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1402 WARN_ON_ONCE(!irqs_disabled());
1403 if (!tr->allocated_snapshot) {
1404 /* Only the nop tracer should hit this when disabling */
1405 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1409 arch_spin_lock(&tr->max_lock);
1411 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1413 if (ret == -EBUSY) {
1415 * We failed to swap the buffer due to a commit taking
1416 * place on this CPU. We fail to record, but we reset
1417 * the max trace buffer (no one writes directly to it)
1418 * and flag that it failed.
1420 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1421 "Failed to swap buffers due to commit in progress\n");
1424 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1426 __update_max_tr(tr, tsk, cpu);
1427 arch_spin_unlock(&tr->max_lock);
1429 #endif /* CONFIG_TRACER_MAX_TRACE */
1431 static int wait_on_pipe(struct trace_iterator *iter, bool full)
1433 /* Iterators are static, they should be filled or empty */
1434 if (trace_buffer_iter(iter, iter->cpu_file))
1437 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1441 #ifdef CONFIG_FTRACE_STARTUP_TEST
1442 static bool selftests_can_run;
1444 struct trace_selftests {
1445 struct list_head list;
1446 struct tracer *type;
1449 static LIST_HEAD(postponed_selftests);
1451 static int save_selftest(struct tracer *type)
1453 struct trace_selftests *selftest;
1455 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1459 selftest->type = type;
1460 list_add(&selftest->list, &postponed_selftests);
1464 static int run_tracer_selftest(struct tracer *type)
1466 struct trace_array *tr = &global_trace;
1467 struct tracer *saved_tracer = tr->current_trace;
1470 if (!type->selftest || tracing_selftest_disabled)
1474 * If a tracer registers early in boot up (before scheduling is
1475 * initialized and such), then do not run its selftests yet.
1476 * Instead, run it a little later in the boot process.
1478 if (!selftests_can_run)
1479 return save_selftest(type);
1482 * Run a selftest on this tracer.
1483 * Here we reset the trace buffer, and set the current
1484 * tracer to be this tracer. The tracer can then run some
1485 * internal tracing to verify that everything is in order.
1486 * If we fail, we do not register this tracer.
1488 tracing_reset_online_cpus(&tr->trace_buffer);
1490 tr->current_trace = type;
1492 #ifdef CONFIG_TRACER_MAX_TRACE
1493 if (type->use_max_tr) {
1494 /* If we expanded the buffers, make sure the max is expanded too */
1495 if (ring_buffer_expanded)
1496 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1497 RING_BUFFER_ALL_CPUS);
1498 tr->allocated_snapshot = true;
1502 /* the test is responsible for initializing and enabling */
1503 pr_info("Testing tracer %s: ", type->name);
1504 ret = type->selftest(type, tr);
1505 /* the test is responsible for resetting too */
1506 tr->current_trace = saved_tracer;
1508 printk(KERN_CONT "FAILED!\n");
1509 /* Add the warning after printing 'FAILED' */
1513 /* Only reset on passing, to avoid touching corrupted buffers */
1514 tracing_reset_online_cpus(&tr->trace_buffer);
1516 #ifdef CONFIG_TRACER_MAX_TRACE
1517 if (type->use_max_tr) {
1518 tr->allocated_snapshot = false;
1520 /* Shrink the max buffer again */
1521 if (ring_buffer_expanded)
1522 ring_buffer_resize(tr->max_buffer.buffer, 1,
1523 RING_BUFFER_ALL_CPUS);
1527 printk(KERN_CONT "PASSED\n");
1531 static __init int init_trace_selftests(void)
1533 struct trace_selftests *p, *n;
1534 struct tracer *t, **last;
1537 selftests_can_run = true;
1539 mutex_lock(&trace_types_lock);
1541 if (list_empty(&postponed_selftests))
1544 pr_info("Running postponed tracer tests:\n");
1546 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1547 ret = run_tracer_selftest(p->type);
1548 /* If the test fails, then warn and remove from available_tracers */
1550 WARN(1, "tracer: %s failed selftest, disabling\n",
1552 last = &trace_types;
1553 for (t = trace_types; t; t = t->next) {
1566 mutex_unlock(&trace_types_lock);
1570 core_initcall(init_trace_selftests);
1572 static inline int run_tracer_selftest(struct tracer *type)
1576 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1578 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1580 static void __init apply_trace_boot_options(void);
1583 * register_tracer - register a tracer with the ftrace system.
1584 * @type - the plugin for the tracer
1586 * Register a new plugin tracer.
1588 int __init register_tracer(struct tracer *type)
1594 pr_info("Tracer must have a name\n");
1598 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1599 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1603 mutex_lock(&trace_types_lock);
1605 tracing_selftest_running = true;
1607 for (t = trace_types; t; t = t->next) {
1608 if (strcmp(type->name, t->name) == 0) {
1610 pr_info("Tracer %s already registered\n",
1617 if (!type->set_flag)
1618 type->set_flag = &dummy_set_flag;
1620 /*allocate a dummy tracer_flags*/
1621 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1626 type->flags->val = 0;
1627 type->flags->opts = dummy_tracer_opt;
1629 if (!type->flags->opts)
1630 type->flags->opts = dummy_tracer_opt;
1632 /* store the tracer for __set_tracer_option */
1633 type->flags->trace = type;
1635 ret = run_tracer_selftest(type);
1639 type->next = trace_types;
1641 add_tracer_options(&global_trace, type);
1644 tracing_selftest_running = false;
1645 mutex_unlock(&trace_types_lock);
1647 if (ret || !default_bootup_tracer)
1650 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1653 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1654 /* Do we want this tracer to start on bootup? */
1655 tracing_set_tracer(&global_trace, type->name);
1656 default_bootup_tracer = NULL;
1658 apply_trace_boot_options();
1660 /* disable other selftests, since this will break it. */
1661 tracing_selftest_disabled = true;
1662 #ifdef CONFIG_FTRACE_STARTUP_TEST
1663 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1671 void tracing_reset(struct trace_buffer *buf, int cpu)
1673 struct ring_buffer *buffer = buf->buffer;
1678 ring_buffer_record_disable(buffer);
1680 /* Make sure all commits have finished */
1681 synchronize_sched();
1682 ring_buffer_reset_cpu(buffer, cpu);
1684 ring_buffer_record_enable(buffer);
1687 void tracing_reset_online_cpus(struct trace_buffer *buf)
1689 struct ring_buffer *buffer = buf->buffer;
1695 ring_buffer_record_disable(buffer);
1697 /* Make sure all commits have finished */
1698 synchronize_sched();
1700 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1702 for_each_online_cpu(cpu)
1703 ring_buffer_reset_cpu(buffer, cpu);
1705 ring_buffer_record_enable(buffer);
1708 /* Must have trace_types_lock held */
1709 void tracing_reset_all_online_cpus(void)
1711 struct trace_array *tr;
1713 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1714 if (!tr->clear_trace)
1716 tr->clear_trace = false;
1717 tracing_reset_online_cpus(&tr->trace_buffer);
1718 #ifdef CONFIG_TRACER_MAX_TRACE
1719 tracing_reset_online_cpus(&tr->max_buffer);
1724 static int *tgid_map;
1726 #define SAVED_CMDLINES_DEFAULT 128
1727 #define NO_CMDLINE_MAP UINT_MAX
1728 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1729 struct saved_cmdlines_buffer {
1730 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1731 unsigned *map_cmdline_to_pid;
1732 unsigned cmdline_num;
1734 char *saved_cmdlines;
1736 static struct saved_cmdlines_buffer *savedcmd;
1738 /* temporary disable recording */
1739 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1741 static inline char *get_saved_cmdlines(int idx)
1743 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1746 static inline void set_cmdline(int idx, const char *cmdline)
1748 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1751 static int allocate_cmdlines_buffer(unsigned int val,
1752 struct saved_cmdlines_buffer *s)
1754 s->map_cmdline_to_pid = kmalloc_array(val,
1755 sizeof(*s->map_cmdline_to_pid),
1757 if (!s->map_cmdline_to_pid)
1760 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1761 if (!s->saved_cmdlines) {
1762 kfree(s->map_cmdline_to_pid);
1767 s->cmdline_num = val;
1768 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1769 sizeof(s->map_pid_to_cmdline));
1770 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1771 val * sizeof(*s->map_cmdline_to_pid));
1776 static int trace_create_savedcmd(void)
1780 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1784 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1794 int is_tracing_stopped(void)
1796 return global_trace.stop_count;
1800 * tracing_start - quick start of the tracer
1802 * If tracing is enabled but was stopped by tracing_stop,
1803 * this will start the tracer back up.
1805 void tracing_start(void)
1807 struct ring_buffer *buffer;
1808 unsigned long flags;
1810 if (tracing_disabled)
1813 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1814 if (--global_trace.stop_count) {
1815 if (global_trace.stop_count < 0) {
1816 /* Someone screwed up their debugging */
1818 global_trace.stop_count = 0;
1823 /* Prevent the buffers from switching */
1824 arch_spin_lock(&global_trace.max_lock);
1826 buffer = global_trace.trace_buffer.buffer;
1828 ring_buffer_record_enable(buffer);
1830 #ifdef CONFIG_TRACER_MAX_TRACE
1831 buffer = global_trace.max_buffer.buffer;
1833 ring_buffer_record_enable(buffer);
1836 arch_spin_unlock(&global_trace.max_lock);
1839 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1842 static void tracing_start_tr(struct trace_array *tr)
1844 struct ring_buffer *buffer;
1845 unsigned long flags;
1847 if (tracing_disabled)
1850 /* If global, we need to also start the max tracer */
1851 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1852 return tracing_start();
1854 raw_spin_lock_irqsave(&tr->start_lock, flags);
1856 if (--tr->stop_count) {
1857 if (tr->stop_count < 0) {
1858 /* Someone screwed up their debugging */
1865 buffer = tr->trace_buffer.buffer;
1867 ring_buffer_record_enable(buffer);
1870 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1874 * tracing_stop - quick stop of the tracer
1876 * Light weight way to stop tracing. Use in conjunction with
1879 void tracing_stop(void)
1881 struct ring_buffer *buffer;
1882 unsigned long flags;
1884 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1885 if (global_trace.stop_count++)
1888 /* Prevent the buffers from switching */
1889 arch_spin_lock(&global_trace.max_lock);
1891 buffer = global_trace.trace_buffer.buffer;
1893 ring_buffer_record_disable(buffer);
1895 #ifdef CONFIG_TRACER_MAX_TRACE
1896 buffer = global_trace.max_buffer.buffer;
1898 ring_buffer_record_disable(buffer);
1901 arch_spin_unlock(&global_trace.max_lock);
1904 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1907 static void tracing_stop_tr(struct trace_array *tr)
1909 struct ring_buffer *buffer;
1910 unsigned long flags;
1912 /* If global, we need to also stop the max tracer */
1913 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1914 return tracing_stop();
1916 raw_spin_lock_irqsave(&tr->start_lock, flags);
1917 if (tr->stop_count++)
1920 buffer = tr->trace_buffer.buffer;
1922 ring_buffer_record_disable(buffer);
1925 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1928 static int trace_save_cmdline(struct task_struct *tsk)
1932 /* treat recording of idle task as a success */
1936 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
1940 * It's not the end of the world if we don't get
1941 * the lock, but we also don't want to spin
1942 * nor do we want to disable interrupts,
1943 * so if we miss here, then better luck next time.
1945 if (!arch_spin_trylock(&trace_cmdline_lock))
1948 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1949 if (idx == NO_CMDLINE_MAP) {
1950 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1953 * Check whether the cmdline buffer at idx has a pid
1954 * mapped. We are going to overwrite that entry so we
1955 * need to clear the map_pid_to_cmdline. Otherwise we
1956 * would read the new comm for the old pid.
1958 pid = savedcmd->map_cmdline_to_pid[idx];
1959 if (pid != NO_CMDLINE_MAP)
1960 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1962 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1963 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1965 savedcmd->cmdline_idx = idx;
1968 set_cmdline(idx, tsk->comm);
1970 arch_spin_unlock(&trace_cmdline_lock);
1975 static void __trace_find_cmdline(int pid, char comm[])
1980 strcpy(comm, "<idle>");
1984 if (WARN_ON_ONCE(pid < 0)) {
1985 strcpy(comm, "<XXX>");
1989 if (pid > PID_MAX_DEFAULT) {
1990 strcpy(comm, "<...>");
1994 map = savedcmd->map_pid_to_cmdline[pid];
1995 if (map != NO_CMDLINE_MAP)
1996 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
1998 strcpy(comm, "<...>");
2001 void trace_find_cmdline(int pid, char comm[])
2004 arch_spin_lock(&trace_cmdline_lock);
2006 __trace_find_cmdline(pid, comm);
2008 arch_spin_unlock(&trace_cmdline_lock);
2012 int trace_find_tgid(int pid)
2014 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2017 return tgid_map[pid];
2020 static int trace_save_tgid(struct task_struct *tsk)
2022 /* treat recording of idle task as a success */
2026 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2029 tgid_map[tsk->pid] = tsk->tgid;
2033 static bool tracing_record_taskinfo_skip(int flags)
2035 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2037 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2039 if (!__this_cpu_read(trace_taskinfo_save))
2045 * tracing_record_taskinfo - record the task info of a task
2047 * @task - task to record
2048 * @flags - TRACE_RECORD_CMDLINE for recording comm
2049 * - TRACE_RECORD_TGID for recording tgid
2051 void tracing_record_taskinfo(struct task_struct *task, int flags)
2055 if (tracing_record_taskinfo_skip(flags))
2059 * Record as much task information as possible. If some fail, continue
2060 * to try to record the others.
2062 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2063 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2065 /* If recording any information failed, retry again soon. */
2069 __this_cpu_write(trace_taskinfo_save, false);
2073 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2075 * @prev - previous task during sched_switch
2076 * @next - next task during sched_switch
2077 * @flags - TRACE_RECORD_CMDLINE for recording comm
2078 * TRACE_RECORD_TGID for recording tgid
2080 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2081 struct task_struct *next, int flags)
2085 if (tracing_record_taskinfo_skip(flags))
2089 * Record as much task information as possible. If some fail, continue
2090 * to try to record the others.
2092 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2093 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2094 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2095 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2097 /* If recording any information failed, retry again soon. */
2101 __this_cpu_write(trace_taskinfo_save, false);
2104 /* Helpers to record a specific task information */
2105 void tracing_record_cmdline(struct task_struct *task)
2107 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2110 void tracing_record_tgid(struct task_struct *task)
2112 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2116 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2117 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2118 * simplifies those functions and keeps them in sync.
2120 enum print_line_t trace_handle_return(struct trace_seq *s)
2122 return trace_seq_has_overflowed(s) ?
2123 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2125 EXPORT_SYMBOL_GPL(trace_handle_return);
2128 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2131 struct task_struct *tsk = current;
2133 entry->preempt_count = pc & 0xff;
2134 entry->pid = (tsk) ? tsk->pid : 0;
2136 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2137 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2139 TRACE_FLAG_IRQS_NOSUPPORT |
2141 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2142 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2143 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2144 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2145 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2147 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2149 struct ring_buffer_event *
2150 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2153 unsigned long flags, int pc)
2155 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2158 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2159 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2160 static int trace_buffered_event_ref;
2163 * trace_buffered_event_enable - enable buffering events
2165 * When events are being filtered, it is quicker to use a temporary
2166 * buffer to write the event data into if there's a likely chance
2167 * that it will not be committed. The discard of the ring buffer
2168 * is not as fast as committing, and is much slower than copying
2171 * When an event is to be filtered, allocate per cpu buffers to
2172 * write the event data into, and if the event is filtered and discarded
2173 * it is simply dropped, otherwise, the entire data is to be committed
2176 void trace_buffered_event_enable(void)
2178 struct ring_buffer_event *event;
2182 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2184 if (trace_buffered_event_ref++)
2187 for_each_tracing_cpu(cpu) {
2188 page = alloc_pages_node(cpu_to_node(cpu),
2189 GFP_KERNEL | __GFP_NORETRY, 0);
2193 event = page_address(page);
2194 memset(event, 0, sizeof(*event));
2196 per_cpu(trace_buffered_event, cpu) = event;
2199 if (cpu == smp_processor_id() &&
2200 this_cpu_read(trace_buffered_event) !=
2201 per_cpu(trace_buffered_event, cpu))
2208 trace_buffered_event_disable();
2211 static void enable_trace_buffered_event(void *data)
2213 /* Probably not needed, but do it anyway */
2215 this_cpu_dec(trace_buffered_event_cnt);
2218 static void disable_trace_buffered_event(void *data)
2220 this_cpu_inc(trace_buffered_event_cnt);
2224 * trace_buffered_event_disable - disable buffering events
2226 * When a filter is removed, it is faster to not use the buffered
2227 * events, and to commit directly into the ring buffer. Free up
2228 * the temp buffers when there are no more users. This requires
2229 * special synchronization with current events.
2231 void trace_buffered_event_disable(void)
2235 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2237 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2240 if (--trace_buffered_event_ref)
2244 /* For each CPU, set the buffer as used. */
2245 smp_call_function_many(tracing_buffer_mask,
2246 disable_trace_buffered_event, NULL, 1);
2249 /* Wait for all current users to finish */
2250 synchronize_sched();
2252 for_each_tracing_cpu(cpu) {
2253 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2254 per_cpu(trace_buffered_event, cpu) = NULL;
2257 * Make sure trace_buffered_event is NULL before clearing
2258 * trace_buffered_event_cnt.
2263 /* Do the work on each cpu */
2264 smp_call_function_many(tracing_buffer_mask,
2265 enable_trace_buffered_event, NULL, 1);
2269 static struct ring_buffer *temp_buffer;
2271 struct ring_buffer_event *
2272 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2273 struct trace_event_file *trace_file,
2274 int type, unsigned long len,
2275 unsigned long flags, int pc)
2277 struct ring_buffer_event *entry;
2280 *current_rb = trace_file->tr->trace_buffer.buffer;
2282 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2283 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2284 (entry = this_cpu_read(trace_buffered_event))) {
2285 /* Try to use the per cpu buffer first */
2286 val = this_cpu_inc_return(trace_buffered_event_cnt);
2288 trace_event_setup(entry, type, flags, pc);
2289 entry->array[0] = len;
2292 this_cpu_dec(trace_buffered_event_cnt);
2295 entry = __trace_buffer_lock_reserve(*current_rb,
2296 type, len, flags, pc);
2298 * If tracing is off, but we have triggers enabled
2299 * we still need to look at the event data. Use the temp_buffer
2300 * to store the trace event for the tigger to use. It's recusive
2301 * safe and will not be recorded anywhere.
2303 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2304 *current_rb = temp_buffer;
2305 entry = __trace_buffer_lock_reserve(*current_rb,
2306 type, len, flags, pc);
2310 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2312 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2313 static DEFINE_MUTEX(tracepoint_printk_mutex);
2315 static void output_printk(struct trace_event_buffer *fbuffer)
2317 struct trace_event_call *event_call;
2318 struct trace_event *event;
2319 unsigned long flags;
2320 struct trace_iterator *iter = tracepoint_print_iter;
2322 /* We should never get here if iter is NULL */
2323 if (WARN_ON_ONCE(!iter))
2326 event_call = fbuffer->trace_file->event_call;
2327 if (!event_call || !event_call->event.funcs ||
2328 !event_call->event.funcs->trace)
2331 event = &fbuffer->trace_file->event_call->event;
2333 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2334 trace_seq_init(&iter->seq);
2335 iter->ent = fbuffer->entry;
2336 event_call->event.funcs->trace(iter, 0, event);
2337 trace_seq_putc(&iter->seq, 0);
2338 printk("%s", iter->seq.buffer);
2340 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2343 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2344 void __user *buffer, size_t *lenp,
2347 int save_tracepoint_printk;
2350 mutex_lock(&tracepoint_printk_mutex);
2351 save_tracepoint_printk = tracepoint_printk;
2353 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2356 * This will force exiting early, as tracepoint_printk
2357 * is always zero when tracepoint_printk_iter is not allocated
2359 if (!tracepoint_print_iter)
2360 tracepoint_printk = 0;
2362 if (save_tracepoint_printk == tracepoint_printk)
2365 if (tracepoint_printk)
2366 static_key_enable(&tracepoint_printk_key.key);
2368 static_key_disable(&tracepoint_printk_key.key);
2371 mutex_unlock(&tracepoint_printk_mutex);
2376 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2378 if (static_key_false(&tracepoint_printk_key.key))
2379 output_printk(fbuffer);
2381 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2382 fbuffer->event, fbuffer->entry,
2383 fbuffer->flags, fbuffer->pc);
2385 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2390 * trace_buffer_unlock_commit_regs()
2391 * trace_event_buffer_commit()
2392 * trace_event_raw_event_xxx()
2394 # define STACK_SKIP 3
2396 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2397 struct ring_buffer *buffer,
2398 struct ring_buffer_event *event,
2399 unsigned long flags, int pc,
2400 struct pt_regs *regs)
2402 __buffer_unlock_commit(buffer, event);
2405 * If regs is not set, then skip the necessary functions.
2406 * Note, we can still get here via blktrace, wakeup tracer
2407 * and mmiotrace, but that's ok if they lose a function or
2408 * two. They are not that meaningful.
2410 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2411 ftrace_trace_userstack(buffer, flags, pc);
2415 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2418 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2419 struct ring_buffer_event *event)
2421 __buffer_unlock_commit(buffer, event);
2425 trace_process_export(struct trace_export *export,
2426 struct ring_buffer_event *event)
2428 struct trace_entry *entry;
2429 unsigned int size = 0;
2431 entry = ring_buffer_event_data(event);
2432 size = ring_buffer_event_length(event);
2433 export->write(export, entry, size);
2436 static DEFINE_MUTEX(ftrace_export_lock);
2438 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2440 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2442 static inline void ftrace_exports_enable(void)
2444 static_branch_enable(&ftrace_exports_enabled);
2447 static inline void ftrace_exports_disable(void)
2449 static_branch_disable(&ftrace_exports_enabled);
2452 void ftrace_exports(struct ring_buffer_event *event)
2454 struct trace_export *export;
2456 preempt_disable_notrace();
2458 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2460 trace_process_export(export, event);
2461 export = rcu_dereference_raw_notrace(export->next);
2464 preempt_enable_notrace();
2468 add_trace_export(struct trace_export **list, struct trace_export *export)
2470 rcu_assign_pointer(export->next, *list);
2472 * We are entering export into the list but another
2473 * CPU might be walking that list. We need to make sure
2474 * the export->next pointer is valid before another CPU sees
2475 * the export pointer included into the list.
2477 rcu_assign_pointer(*list, export);
2481 rm_trace_export(struct trace_export **list, struct trace_export *export)
2483 struct trace_export **p;
2485 for (p = list; *p != NULL; p = &(*p)->next)
2492 rcu_assign_pointer(*p, (*p)->next);
2498 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2501 ftrace_exports_enable();
2503 add_trace_export(list, export);
2507 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2511 ret = rm_trace_export(list, export);
2513 ftrace_exports_disable();
2518 int register_ftrace_export(struct trace_export *export)
2520 if (WARN_ON_ONCE(!export->write))
2523 mutex_lock(&ftrace_export_lock);
2525 add_ftrace_export(&ftrace_exports_list, export);
2527 mutex_unlock(&ftrace_export_lock);
2531 EXPORT_SYMBOL_GPL(register_ftrace_export);
2533 int unregister_ftrace_export(struct trace_export *export)
2537 mutex_lock(&ftrace_export_lock);
2539 ret = rm_ftrace_export(&ftrace_exports_list, export);
2541 mutex_unlock(&ftrace_export_lock);
2545 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2548 trace_function(struct trace_array *tr,
2549 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2552 struct trace_event_call *call = &event_function;
2553 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2554 struct ring_buffer_event *event;
2555 struct ftrace_entry *entry;
2557 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2561 entry = ring_buffer_event_data(event);
2563 entry->parent_ip = parent_ip;
2565 if (!call_filter_check_discard(call, entry, buffer, event)) {
2566 if (static_branch_unlikely(&ftrace_exports_enabled))
2567 ftrace_exports(event);
2568 __buffer_unlock_commit(buffer, event);
2572 #ifdef CONFIG_STACKTRACE
2574 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2575 struct ftrace_stack {
2576 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2579 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2580 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2582 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2583 unsigned long flags,
2584 int skip, int pc, struct pt_regs *regs)
2586 struct trace_event_call *call = &event_kernel_stack;
2587 struct ring_buffer_event *event;
2588 struct stack_entry *entry;
2589 struct stack_trace trace;
2591 int size = FTRACE_STACK_ENTRIES;
2593 trace.nr_entries = 0;
2597 * Add one, for this function and the call to save_stack_trace()
2598 * If regs is set, then these functions will not be in the way.
2600 #ifndef CONFIG_UNWINDER_ORC
2606 * Since events can happen in NMIs there's no safe way to
2607 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2608 * or NMI comes in, it will just have to use the default
2609 * FTRACE_STACK_SIZE.
2611 preempt_disable_notrace();
2613 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2615 * We don't need any atomic variables, just a barrier.
2616 * If an interrupt comes in, we don't care, because it would
2617 * have exited and put the counter back to what we want.
2618 * We just need a barrier to keep gcc from moving things
2622 if (use_stack == 1) {
2623 trace.entries = this_cpu_ptr(ftrace_stack.calls);
2624 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2627 save_stack_trace_regs(regs, &trace);
2629 save_stack_trace(&trace);
2631 if (trace.nr_entries > size)
2632 size = trace.nr_entries;
2634 /* From now on, use_stack is a boolean */
2637 size *= sizeof(unsigned long);
2639 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2640 sizeof(*entry) + size, flags, pc);
2643 entry = ring_buffer_event_data(event);
2645 memset(&entry->caller, 0, size);
2648 memcpy(&entry->caller, trace.entries,
2649 trace.nr_entries * sizeof(unsigned long));
2651 trace.max_entries = FTRACE_STACK_ENTRIES;
2652 trace.entries = entry->caller;
2654 save_stack_trace_regs(regs, &trace);
2656 save_stack_trace(&trace);
2659 entry->size = trace.nr_entries;
2661 if (!call_filter_check_discard(call, entry, buffer, event))
2662 __buffer_unlock_commit(buffer, event);
2665 /* Again, don't let gcc optimize things here */
2667 __this_cpu_dec(ftrace_stack_reserve);
2668 preempt_enable_notrace();
2672 static inline void ftrace_trace_stack(struct trace_array *tr,
2673 struct ring_buffer *buffer,
2674 unsigned long flags,
2675 int skip, int pc, struct pt_regs *regs)
2677 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2680 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2683 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2686 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2688 if (rcu_is_watching()) {
2689 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2694 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2695 * but if the above rcu_is_watching() failed, then the NMI
2696 * triggered someplace critical, and rcu_irq_enter() should
2697 * not be called from NMI.
2699 if (unlikely(in_nmi()))
2702 rcu_irq_enter_irqson();
2703 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2704 rcu_irq_exit_irqson();
2708 * trace_dump_stack - record a stack back trace in the trace buffer
2709 * @skip: Number of functions to skip (helper handlers)
2711 void trace_dump_stack(int skip)
2713 unsigned long flags;
2715 if (tracing_disabled || tracing_selftest_running)
2718 local_save_flags(flags);
2720 #ifndef CONFIG_UNWINDER_ORC
2721 /* Skip 1 to skip this function. */
2724 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2725 flags, skip, preempt_count(), NULL);
2728 static DEFINE_PER_CPU(int, user_stack_count);
2731 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2733 struct trace_event_call *call = &event_user_stack;
2734 struct ring_buffer_event *event;
2735 struct userstack_entry *entry;
2736 struct stack_trace trace;
2738 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2742 * NMIs can not handle page faults, even with fix ups.
2743 * The save user stack can (and often does) fault.
2745 if (unlikely(in_nmi()))
2749 * prevent recursion, since the user stack tracing may
2750 * trigger other kernel events.
2753 if (__this_cpu_read(user_stack_count))
2756 __this_cpu_inc(user_stack_count);
2758 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2759 sizeof(*entry), flags, pc);
2761 goto out_drop_count;
2762 entry = ring_buffer_event_data(event);
2764 entry->tgid = current->tgid;
2765 memset(&entry->caller, 0, sizeof(entry->caller));
2767 trace.nr_entries = 0;
2768 trace.max_entries = FTRACE_STACK_ENTRIES;
2770 trace.entries = entry->caller;
2772 save_stack_trace_user(&trace);
2773 if (!call_filter_check_discard(call, entry, buffer, event))
2774 __buffer_unlock_commit(buffer, event);
2777 __this_cpu_dec(user_stack_count);
2783 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2785 ftrace_trace_userstack(tr, flags, preempt_count());
2789 #endif /* CONFIG_STACKTRACE */
2791 /* created for use with alloc_percpu */
2792 struct trace_buffer_struct {
2794 char buffer[4][TRACE_BUF_SIZE];
2797 static struct trace_buffer_struct *trace_percpu_buffer;
2800 * Thise allows for lockless recording. If we're nested too deeply, then
2801 * this returns NULL.
2803 static char *get_trace_buf(void)
2805 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2807 if (!buffer || buffer->nesting >= 4)
2812 /* Interrupts must see nesting incremented before we use the buffer */
2814 return &buffer->buffer[buffer->nesting][0];
2817 static void put_trace_buf(void)
2819 /* Don't let the decrement of nesting leak before this */
2821 this_cpu_dec(trace_percpu_buffer->nesting);
2824 static int alloc_percpu_trace_buffer(void)
2826 struct trace_buffer_struct *buffers;
2828 buffers = alloc_percpu(struct trace_buffer_struct);
2829 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2832 trace_percpu_buffer = buffers;
2836 static int buffers_allocated;
2838 void trace_printk_init_buffers(void)
2840 if (buffers_allocated)
2843 if (alloc_percpu_trace_buffer())
2846 /* trace_printk() is for debug use only. Don't use it in production. */
2849 pr_warn("**********************************************************\n");
2850 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2852 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2854 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2855 pr_warn("** unsafe for production use. **\n");
2857 pr_warn("** If you see this message and you are not debugging **\n");
2858 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2860 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2861 pr_warn("**********************************************************\n");
2863 /* Expand the buffers to set size */
2864 tracing_update_buffers();
2866 buffers_allocated = 1;
2869 * trace_printk_init_buffers() can be called by modules.
2870 * If that happens, then we need to start cmdline recording
2871 * directly here. If the global_trace.buffer is already
2872 * allocated here, then this was called by module code.
2874 if (global_trace.trace_buffer.buffer)
2875 tracing_start_cmdline_record();
2878 void trace_printk_start_comm(void)
2880 /* Start tracing comms if trace printk is set */
2881 if (!buffers_allocated)
2883 tracing_start_cmdline_record();
2886 static void trace_printk_start_stop_comm(int enabled)
2888 if (!buffers_allocated)
2892 tracing_start_cmdline_record();
2894 tracing_stop_cmdline_record();
2898 * trace_vbprintk - write binary msg to tracing buffer
2901 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2903 struct trace_event_call *call = &event_bprint;
2904 struct ring_buffer_event *event;
2905 struct ring_buffer *buffer;
2906 struct trace_array *tr = &global_trace;
2907 struct bprint_entry *entry;
2908 unsigned long flags;
2910 int len = 0, size, pc;
2912 if (unlikely(tracing_selftest_running || tracing_disabled))
2915 /* Don't pollute graph traces with trace_vprintk internals */
2916 pause_graph_tracing();
2918 pc = preempt_count();
2919 preempt_disable_notrace();
2921 tbuffer = get_trace_buf();
2927 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2929 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2932 local_save_flags(flags);
2933 size = sizeof(*entry) + sizeof(u32) * len;
2934 buffer = tr->trace_buffer.buffer;
2935 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2939 entry = ring_buffer_event_data(event);
2943 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2944 if (!call_filter_check_discard(call, entry, buffer, event)) {
2945 __buffer_unlock_commit(buffer, event);
2946 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2953 preempt_enable_notrace();
2954 unpause_graph_tracing();
2958 EXPORT_SYMBOL_GPL(trace_vbprintk);
2961 __trace_array_vprintk(struct ring_buffer *buffer,
2962 unsigned long ip, const char *fmt, va_list args)
2964 struct trace_event_call *call = &event_print;
2965 struct ring_buffer_event *event;
2966 int len = 0, size, pc;
2967 struct print_entry *entry;
2968 unsigned long flags;
2971 if (tracing_disabled || tracing_selftest_running)
2974 /* Don't pollute graph traces with trace_vprintk internals */
2975 pause_graph_tracing();
2977 pc = preempt_count();
2978 preempt_disable_notrace();
2981 tbuffer = get_trace_buf();
2987 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2989 local_save_flags(flags);
2990 size = sizeof(*entry) + len + 1;
2991 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2995 entry = ring_buffer_event_data(event);
2998 memcpy(&entry->buf, tbuffer, len + 1);
2999 if (!call_filter_check_discard(call, entry, buffer, event)) {
3000 __buffer_unlock_commit(buffer, event);
3001 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3008 preempt_enable_notrace();
3009 unpause_graph_tracing();
3014 int trace_array_vprintk(struct trace_array *tr,
3015 unsigned long ip, const char *fmt, va_list args)
3017 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3020 int trace_array_printk(struct trace_array *tr,
3021 unsigned long ip, const char *fmt, ...)
3026 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3030 ret = trace_array_vprintk(tr, ip, fmt, ap);
3035 int trace_array_printk_buf(struct ring_buffer *buffer,
3036 unsigned long ip, const char *fmt, ...)
3041 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3045 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3050 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3052 return trace_array_vprintk(&global_trace, ip, fmt, args);
3054 EXPORT_SYMBOL_GPL(trace_vprintk);
3056 static void trace_iterator_increment(struct trace_iterator *iter)
3058 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3062 ring_buffer_read(buf_iter, NULL);
3065 static struct trace_entry *
3066 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3067 unsigned long *lost_events)
3069 struct ring_buffer_event *event;
3070 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3073 event = ring_buffer_iter_peek(buf_iter, ts);
3075 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3079 iter->ent_size = ring_buffer_event_length(event);
3080 return ring_buffer_event_data(event);
3086 static struct trace_entry *
3087 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3088 unsigned long *missing_events, u64 *ent_ts)
3090 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3091 struct trace_entry *ent, *next = NULL;
3092 unsigned long lost_events = 0, next_lost = 0;
3093 int cpu_file = iter->cpu_file;
3094 u64 next_ts = 0, ts;
3100 * If we are in a per_cpu trace file, don't bother by iterating over
3101 * all cpu and peek directly.
3103 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3104 if (ring_buffer_empty_cpu(buffer, cpu_file))
3106 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3108 *ent_cpu = cpu_file;
3113 for_each_tracing_cpu(cpu) {
3115 if (ring_buffer_empty_cpu(buffer, cpu))
3118 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3121 * Pick the entry with the smallest timestamp:
3123 if (ent && (!next || ts < next_ts)) {
3127 next_lost = lost_events;
3128 next_size = iter->ent_size;
3132 iter->ent_size = next_size;
3135 *ent_cpu = next_cpu;
3141 *missing_events = next_lost;
3146 /* Find the next real entry, without updating the iterator itself */
3147 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3148 int *ent_cpu, u64 *ent_ts)
3150 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3153 /* Find the next real entry, and increment the iterator to the next entry */
3154 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3156 iter->ent = __find_next_entry(iter, &iter->cpu,
3157 &iter->lost_events, &iter->ts);
3160 trace_iterator_increment(iter);
3162 return iter->ent ? iter : NULL;
3165 static void trace_consume(struct trace_iterator *iter)
3167 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3168 &iter->lost_events);
3171 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3173 struct trace_iterator *iter = m->private;
3177 WARN_ON_ONCE(iter->leftover);
3181 /* can't go backwards */
3186 ent = trace_find_next_entry_inc(iter);
3190 while (ent && iter->idx < i)
3191 ent = trace_find_next_entry_inc(iter);
3198 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3200 struct ring_buffer_event *event;
3201 struct ring_buffer_iter *buf_iter;
3202 unsigned long entries = 0;
3205 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3207 buf_iter = trace_buffer_iter(iter, cpu);
3211 ring_buffer_iter_reset(buf_iter);
3214 * We could have the case with the max latency tracers
3215 * that a reset never took place on a cpu. This is evident
3216 * by the timestamp being before the start of the buffer.
3218 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3219 if (ts >= iter->trace_buffer->time_start)
3222 ring_buffer_read(buf_iter, NULL);
3225 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3229 * The current tracer is copied to avoid a global locking
3232 static void *s_start(struct seq_file *m, loff_t *pos)
3234 struct trace_iterator *iter = m->private;
3235 struct trace_array *tr = iter->tr;
3236 int cpu_file = iter->cpu_file;
3242 * copy the tracer to avoid using a global lock all around.
3243 * iter->trace is a copy of current_trace, the pointer to the
3244 * name may be used instead of a strcmp(), as iter->trace->name
3245 * will point to the same string as current_trace->name.
3247 mutex_lock(&trace_types_lock);
3248 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3249 *iter->trace = *tr->current_trace;
3250 mutex_unlock(&trace_types_lock);
3252 #ifdef CONFIG_TRACER_MAX_TRACE
3253 if (iter->snapshot && iter->trace->use_max_tr)
3254 return ERR_PTR(-EBUSY);
3257 if (!iter->snapshot)
3258 atomic_inc(&trace_record_taskinfo_disabled);
3260 if (*pos != iter->pos) {
3265 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3266 for_each_tracing_cpu(cpu)
3267 tracing_iter_reset(iter, cpu);
3269 tracing_iter_reset(iter, cpu_file);
3272 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3277 * If we overflowed the seq_file before, then we want
3278 * to just reuse the trace_seq buffer again.
3284 p = s_next(m, p, &l);
3288 trace_event_read_lock();
3289 trace_access_lock(cpu_file);
3293 static void s_stop(struct seq_file *m, void *p)
3295 struct trace_iterator *iter = m->private;
3297 #ifdef CONFIG_TRACER_MAX_TRACE
3298 if (iter->snapshot && iter->trace->use_max_tr)
3302 if (!iter->snapshot)
3303 atomic_dec(&trace_record_taskinfo_disabled);
3305 trace_access_unlock(iter->cpu_file);
3306 trace_event_read_unlock();
3310 get_total_entries(struct trace_buffer *buf,
3311 unsigned long *total, unsigned long *entries)
3313 unsigned long count;
3319 for_each_tracing_cpu(cpu) {
3320 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3322 * If this buffer has skipped entries, then we hold all
3323 * entries for the trace and we need to ignore the
3324 * ones before the time stamp.
3326 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3327 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3328 /* total is the same as the entries */
3332 ring_buffer_overrun_cpu(buf->buffer, cpu);
3337 static void print_lat_help_header(struct seq_file *m)
3339 seq_puts(m, "# _------=> CPU# \n"
3340 "# / _-----=> irqs-off \n"
3341 "# | / _----=> need-resched \n"
3342 "# || / _---=> hardirq/softirq \n"
3343 "# ||| / _--=> preempt-depth \n"
3345 "# cmd pid ||||| time | caller \n"
3346 "# \\ / ||||| \\ | / \n");
3349 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3351 unsigned long total;
3352 unsigned long entries;
3354 get_total_entries(buf, &total, &entries);
3355 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3356 entries, total, num_online_cpus());
3360 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3363 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3365 print_event_info(buf, m);
3367 seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3368 seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
3371 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3374 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3375 const char tgid_space[] = " ";
3376 const char space[] = " ";
3378 seq_printf(m, "# %s _-----=> irqs-off\n",
3379 tgid ? tgid_space : space);
3380 seq_printf(m, "# %s / _----=> need-resched\n",
3381 tgid ? tgid_space : space);
3382 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3383 tgid ? tgid_space : space);
3384 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3385 tgid ? tgid_space : space);
3386 seq_printf(m, "# %s||| / delay\n",
3387 tgid ? tgid_space : space);
3388 seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
3389 tgid ? " TGID " : space);
3390 seq_printf(m, "# | | | %s|||| | |\n",
3391 tgid ? " | " : space);
3395 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3397 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3398 struct trace_buffer *buf = iter->trace_buffer;
3399 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3400 struct tracer *type = iter->trace;
3401 unsigned long entries;
3402 unsigned long total;
3403 const char *name = "preemption";
3407 get_total_entries(buf, &total, &entries);
3409 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3411 seq_puts(m, "# -----------------------------------"
3412 "---------------------------------\n");
3413 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3414 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3415 nsecs_to_usecs(data->saved_latency),
3419 #if defined(CONFIG_PREEMPT_NONE)
3421 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3423 #elif defined(CONFIG_PREEMPT)
3428 /* These are reserved for later use */
3431 seq_printf(m, " #P:%d)\n", num_online_cpus());
3435 seq_puts(m, "# -----------------\n");
3436 seq_printf(m, "# | task: %.16s-%d "
3437 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3438 data->comm, data->pid,
3439 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3440 data->policy, data->rt_priority);
3441 seq_puts(m, "# -----------------\n");
3443 if (data->critical_start) {
3444 seq_puts(m, "# => started at: ");
3445 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3446 trace_print_seq(m, &iter->seq);
3447 seq_puts(m, "\n# => ended at: ");
3448 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3449 trace_print_seq(m, &iter->seq);
3450 seq_puts(m, "\n#\n");
3456 static void test_cpu_buff_start(struct trace_iterator *iter)
3458 struct trace_seq *s = &iter->seq;
3459 struct trace_array *tr = iter->tr;
3461 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3464 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3467 if (cpumask_available(iter->started) &&
3468 cpumask_test_cpu(iter->cpu, iter->started))
3471 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3474 if (cpumask_available(iter->started))
3475 cpumask_set_cpu(iter->cpu, iter->started);
3477 /* Don't print started cpu buffer for the first entry of the trace */
3479 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3483 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3485 struct trace_array *tr = iter->tr;
3486 struct trace_seq *s = &iter->seq;
3487 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3488 struct trace_entry *entry;
3489 struct trace_event *event;
3493 test_cpu_buff_start(iter);
3495 event = ftrace_find_event(entry->type);
3497 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3498 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3499 trace_print_lat_context(iter);
3501 trace_print_context(iter);
3504 if (trace_seq_has_overflowed(s))
3505 return TRACE_TYPE_PARTIAL_LINE;
3508 return event->funcs->trace(iter, sym_flags, event);
3510 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3512 return trace_handle_return(s);
3515 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3517 struct trace_array *tr = iter->tr;
3518 struct trace_seq *s = &iter->seq;
3519 struct trace_entry *entry;
3520 struct trace_event *event;
3524 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3525 trace_seq_printf(s, "%d %d %llu ",
3526 entry->pid, iter->cpu, iter->ts);
3528 if (trace_seq_has_overflowed(s))
3529 return TRACE_TYPE_PARTIAL_LINE;
3531 event = ftrace_find_event(entry->type);
3533 return event->funcs->raw(iter, 0, event);
3535 trace_seq_printf(s, "%d ?\n", entry->type);
3537 return trace_handle_return(s);
3540 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3542 struct trace_array *tr = iter->tr;
3543 struct trace_seq *s = &iter->seq;
3544 unsigned char newline = '\n';
3545 struct trace_entry *entry;
3546 struct trace_event *event;
3550 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3551 SEQ_PUT_HEX_FIELD(s, entry->pid);
3552 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3553 SEQ_PUT_HEX_FIELD(s, iter->ts);
3554 if (trace_seq_has_overflowed(s))
3555 return TRACE_TYPE_PARTIAL_LINE;
3558 event = ftrace_find_event(entry->type);
3560 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3561 if (ret != TRACE_TYPE_HANDLED)
3565 SEQ_PUT_FIELD(s, newline);
3567 return trace_handle_return(s);
3570 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3572 struct trace_array *tr = iter->tr;
3573 struct trace_seq *s = &iter->seq;
3574 struct trace_entry *entry;
3575 struct trace_event *event;
3579 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3580 SEQ_PUT_FIELD(s, entry->pid);
3581 SEQ_PUT_FIELD(s, iter->cpu);
3582 SEQ_PUT_FIELD(s, iter->ts);
3583 if (trace_seq_has_overflowed(s))
3584 return TRACE_TYPE_PARTIAL_LINE;
3587 event = ftrace_find_event(entry->type);
3588 return event ? event->funcs->binary(iter, 0, event) :
3592 int trace_empty(struct trace_iterator *iter)
3594 struct ring_buffer_iter *buf_iter;
3597 /* If we are looking at one CPU buffer, only check that one */
3598 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3599 cpu = iter->cpu_file;
3600 buf_iter = trace_buffer_iter(iter, cpu);
3602 if (!ring_buffer_iter_empty(buf_iter))
3605 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3611 for_each_tracing_cpu(cpu) {
3612 buf_iter = trace_buffer_iter(iter, cpu);
3614 if (!ring_buffer_iter_empty(buf_iter))
3617 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3625 /* Called with trace_event_read_lock() held. */
3626 enum print_line_t print_trace_line(struct trace_iterator *iter)
3628 struct trace_array *tr = iter->tr;
3629 unsigned long trace_flags = tr->trace_flags;
3630 enum print_line_t ret;
3632 if (iter->lost_events) {
3633 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3634 iter->cpu, iter->lost_events);
3635 if (trace_seq_has_overflowed(&iter->seq))
3636 return TRACE_TYPE_PARTIAL_LINE;
3639 if (iter->trace && iter->trace->print_line) {
3640 ret = iter->trace->print_line(iter);
3641 if (ret != TRACE_TYPE_UNHANDLED)
3645 if (iter->ent->type == TRACE_BPUTS &&
3646 trace_flags & TRACE_ITER_PRINTK &&
3647 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3648 return trace_print_bputs_msg_only(iter);
3650 if (iter->ent->type == TRACE_BPRINT &&
3651 trace_flags & TRACE_ITER_PRINTK &&
3652 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3653 return trace_print_bprintk_msg_only(iter);
3655 if (iter->ent->type == TRACE_PRINT &&
3656 trace_flags & TRACE_ITER_PRINTK &&
3657 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3658 return trace_print_printk_msg_only(iter);
3660 if (trace_flags & TRACE_ITER_BIN)
3661 return print_bin_fmt(iter);
3663 if (trace_flags & TRACE_ITER_HEX)
3664 return print_hex_fmt(iter);
3666 if (trace_flags & TRACE_ITER_RAW)
3667 return print_raw_fmt(iter);
3669 return print_trace_fmt(iter);
3672 void trace_latency_header(struct seq_file *m)
3674 struct trace_iterator *iter = m->private;
3675 struct trace_array *tr = iter->tr;
3677 /* print nothing if the buffers are empty */
3678 if (trace_empty(iter))
3681 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3682 print_trace_header(m, iter);
3684 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3685 print_lat_help_header(m);
3688 void trace_default_header(struct seq_file *m)
3690 struct trace_iterator *iter = m->private;
3691 struct trace_array *tr = iter->tr;
3692 unsigned long trace_flags = tr->trace_flags;
3694 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3697 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3698 /* print nothing if the buffers are empty */
3699 if (trace_empty(iter))
3701 print_trace_header(m, iter);
3702 if (!(trace_flags & TRACE_ITER_VERBOSE))
3703 print_lat_help_header(m);
3705 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3706 if (trace_flags & TRACE_ITER_IRQ_INFO)
3707 print_func_help_header_irq(iter->trace_buffer,
3710 print_func_help_header(iter->trace_buffer, m,
3716 static void test_ftrace_alive(struct seq_file *m)
3718 if (!ftrace_is_dead())
3720 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3721 "# MAY BE MISSING FUNCTION EVENTS\n");
3724 #ifdef CONFIG_TRACER_MAX_TRACE
3725 static void show_snapshot_main_help(struct seq_file *m)
3727 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3728 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3729 "# Takes a snapshot of the main buffer.\n"
3730 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3731 "# (Doesn't have to be '2' works with any number that\n"
3732 "# is not a '0' or '1')\n");
3735 static void show_snapshot_percpu_help(struct seq_file *m)
3737 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3738 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3739 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3740 "# Takes a snapshot of the main buffer for this cpu.\n");
3742 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3743 "# Must use main snapshot file to allocate.\n");
3745 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3746 "# (Doesn't have to be '2' works with any number that\n"
3747 "# is not a '0' or '1')\n");
3750 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3752 if (iter->tr->allocated_snapshot)
3753 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3755 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3757 seq_puts(m, "# Snapshot commands:\n");
3758 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3759 show_snapshot_main_help(m);
3761 show_snapshot_percpu_help(m);
3764 /* Should never be called */
3765 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3768 static int s_show(struct seq_file *m, void *v)
3770 struct trace_iterator *iter = v;
3773 if (iter->ent == NULL) {
3775 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3777 test_ftrace_alive(m);
3779 if (iter->snapshot && trace_empty(iter))
3780 print_snapshot_help(m, iter);
3781 else if (iter->trace && iter->trace->print_header)
3782 iter->trace->print_header(m);
3784 trace_default_header(m);
3786 } else if (iter->leftover) {
3788 * If we filled the seq_file buffer earlier, we
3789 * want to just show it now.
3791 ret = trace_print_seq(m, &iter->seq);
3793 /* ret should this time be zero, but you never know */
3794 iter->leftover = ret;
3797 print_trace_line(iter);
3798 ret = trace_print_seq(m, &iter->seq);
3800 * If we overflow the seq_file buffer, then it will
3801 * ask us for this data again at start up.
3803 * ret is 0 if seq_file write succeeded.
3806 iter->leftover = ret;
3813 * Should be used after trace_array_get(), trace_types_lock
3814 * ensures that i_cdev was already initialized.
3816 static inline int tracing_get_cpu(struct inode *inode)
3818 if (inode->i_cdev) /* See trace_create_cpu_file() */
3819 return (long)inode->i_cdev - 1;
3820 return RING_BUFFER_ALL_CPUS;
3823 static const struct seq_operations tracer_seq_ops = {
3830 static struct trace_iterator *
3831 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3833 struct trace_array *tr = inode->i_private;
3834 struct trace_iterator *iter;
3837 if (tracing_disabled)
3838 return ERR_PTR(-ENODEV);
3840 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3842 return ERR_PTR(-ENOMEM);
3844 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3846 if (!iter->buffer_iter)
3850 * We make a copy of the current tracer to avoid concurrent
3851 * changes on it while we are reading.
3853 mutex_lock(&trace_types_lock);
3854 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3858 *iter->trace = *tr->current_trace;
3860 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3865 #ifdef CONFIG_TRACER_MAX_TRACE
3866 /* Currently only the top directory has a snapshot */
3867 if (tr->current_trace->print_max || snapshot)
3868 iter->trace_buffer = &tr->max_buffer;
3871 iter->trace_buffer = &tr->trace_buffer;
3872 iter->snapshot = snapshot;
3874 iter->cpu_file = tracing_get_cpu(inode);
3875 mutex_init(&iter->mutex);
3877 /* Notify the tracer early; before we stop tracing. */
3878 if (iter->trace && iter->trace->open)
3879 iter->trace->open(iter);
3881 /* Annotate start of buffers if we had overruns */
3882 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3883 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3885 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3886 if (trace_clocks[tr->clock_id].in_ns)
3887 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3889 /* stop the trace while dumping if we are not opening "snapshot" */
3890 if (!iter->snapshot)
3891 tracing_stop_tr(tr);
3893 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3894 for_each_tracing_cpu(cpu) {
3895 iter->buffer_iter[cpu] =
3896 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3898 ring_buffer_read_prepare_sync();
3899 for_each_tracing_cpu(cpu) {
3900 ring_buffer_read_start(iter->buffer_iter[cpu]);
3901 tracing_iter_reset(iter, cpu);
3904 cpu = iter->cpu_file;
3905 iter->buffer_iter[cpu] =
3906 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3907 ring_buffer_read_prepare_sync();
3908 ring_buffer_read_start(iter->buffer_iter[cpu]);
3909 tracing_iter_reset(iter, cpu);
3912 mutex_unlock(&trace_types_lock);
3917 mutex_unlock(&trace_types_lock);
3919 kfree(iter->buffer_iter);
3921 seq_release_private(inode, file);
3922 return ERR_PTR(-ENOMEM);
3925 int tracing_open_generic(struct inode *inode, struct file *filp)
3927 if (tracing_disabled)
3930 filp->private_data = inode->i_private;
3934 bool tracing_is_disabled(void)
3936 return (tracing_disabled) ? true: false;
3940 * Open and update trace_array ref count.
3941 * Must have the current trace_array passed to it.
3943 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3945 struct trace_array *tr = inode->i_private;
3947 if (tracing_disabled)
3950 if (trace_array_get(tr) < 0)
3953 filp->private_data = inode->i_private;
3958 static int tracing_release(struct inode *inode, struct file *file)
3960 struct trace_array *tr = inode->i_private;
3961 struct seq_file *m = file->private_data;
3962 struct trace_iterator *iter;
3965 if (!(file->f_mode & FMODE_READ)) {
3966 trace_array_put(tr);
3970 /* Writes do not use seq_file */
3972 mutex_lock(&trace_types_lock);
3974 for_each_tracing_cpu(cpu) {
3975 if (iter->buffer_iter[cpu])
3976 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3979 if (iter->trace && iter->trace->close)
3980 iter->trace->close(iter);
3982 if (!iter->snapshot)
3983 /* reenable tracing if it was previously enabled */
3984 tracing_start_tr(tr);
3986 __trace_array_put(tr);
3988 mutex_unlock(&trace_types_lock);
3990 mutex_destroy(&iter->mutex);
3991 free_cpumask_var(iter->started);
3993 kfree(iter->buffer_iter);
3994 seq_release_private(inode, file);
3999 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4001 struct trace_array *tr = inode->i_private;
4003 trace_array_put(tr);
4007 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4009 struct trace_array *tr = inode->i_private;
4011 trace_array_put(tr);
4013 return single_release(inode, file);
4016 static int tracing_open(struct inode *inode, struct file *file)
4018 struct trace_array *tr = inode->i_private;
4019 struct trace_iterator *iter;
4022 if (trace_array_get(tr) < 0)
4025 /* If this file was open for write, then erase contents */
4026 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4027 int cpu = tracing_get_cpu(inode);
4028 struct trace_buffer *trace_buf = &tr->trace_buffer;
4030 #ifdef CONFIG_TRACER_MAX_TRACE
4031 if (tr->current_trace->print_max)
4032 trace_buf = &tr->max_buffer;
4035 if (cpu == RING_BUFFER_ALL_CPUS)
4036 tracing_reset_online_cpus(trace_buf);
4038 tracing_reset(trace_buf, cpu);
4041 if (file->f_mode & FMODE_READ) {
4042 iter = __tracing_open(inode, file, false);
4044 ret = PTR_ERR(iter);
4045 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4046 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4050 trace_array_put(tr);
4056 * Some tracers are not suitable for instance buffers.
4057 * A tracer is always available for the global array (toplevel)
4058 * or if it explicitly states that it is.
4061 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4063 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4066 /* Find the next tracer that this trace array may use */
4067 static struct tracer *
4068 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4070 while (t && !trace_ok_for_array(t, tr))
4077 t_next(struct seq_file *m, void *v, loff_t *pos)
4079 struct trace_array *tr = m->private;
4080 struct tracer *t = v;
4085 t = get_tracer_for_array(tr, t->next);
4090 static void *t_start(struct seq_file *m, loff_t *pos)
4092 struct trace_array *tr = m->private;
4096 mutex_lock(&trace_types_lock);
4098 t = get_tracer_for_array(tr, trace_types);
4099 for (; t && l < *pos; t = t_next(m, t, &l))
4105 static void t_stop(struct seq_file *m, void *p)
4107 mutex_unlock(&trace_types_lock);
4110 static int t_show(struct seq_file *m, void *v)
4112 struct tracer *t = v;
4117 seq_puts(m, t->name);
4126 static const struct seq_operations show_traces_seq_ops = {
4133 static int show_traces_open(struct inode *inode, struct file *file)
4135 struct trace_array *tr = inode->i_private;
4139 if (tracing_disabled)
4142 ret = seq_open(file, &show_traces_seq_ops);
4146 m = file->private_data;
4153 tracing_write_stub(struct file *filp, const char __user *ubuf,
4154 size_t count, loff_t *ppos)
4159 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4163 if (file->f_mode & FMODE_READ)
4164 ret = seq_lseek(file, offset, whence);
4166 file->f_pos = ret = 0;
4171 static const struct file_operations tracing_fops = {
4172 .open = tracing_open,
4174 .write = tracing_write_stub,
4175 .llseek = tracing_lseek,
4176 .release = tracing_release,
4179 static const struct file_operations show_traces_fops = {
4180 .open = show_traces_open,
4182 .release = seq_release,
4183 .llseek = seq_lseek,
4187 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4188 size_t count, loff_t *ppos)
4190 struct trace_array *tr = file_inode(filp)->i_private;
4194 len = snprintf(NULL, 0, "%*pb\n",
4195 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4196 mask_str = kmalloc(len, GFP_KERNEL);
4200 len = snprintf(mask_str, len, "%*pb\n",
4201 cpumask_pr_args(tr->tracing_cpumask));
4206 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4215 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4216 size_t count, loff_t *ppos)
4218 struct trace_array *tr = file_inode(filp)->i_private;
4219 cpumask_var_t tracing_cpumask_new;
4222 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4225 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4229 local_irq_disable();
4230 arch_spin_lock(&tr->max_lock);
4231 for_each_tracing_cpu(cpu) {
4233 * Increase/decrease the disabled counter if we are
4234 * about to flip a bit in the cpumask:
4236 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4237 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4238 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4239 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4241 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4242 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4243 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4244 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4247 arch_spin_unlock(&tr->max_lock);
4250 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4251 free_cpumask_var(tracing_cpumask_new);
4256 free_cpumask_var(tracing_cpumask_new);
4261 static const struct file_operations tracing_cpumask_fops = {
4262 .open = tracing_open_generic_tr,
4263 .read = tracing_cpumask_read,
4264 .write = tracing_cpumask_write,
4265 .release = tracing_release_generic_tr,
4266 .llseek = generic_file_llseek,
4269 static int tracing_trace_options_show(struct seq_file *m, void *v)
4271 struct tracer_opt *trace_opts;
4272 struct trace_array *tr = m->private;
4276 mutex_lock(&trace_types_lock);
4277 tracer_flags = tr->current_trace->flags->val;
4278 trace_opts = tr->current_trace->flags->opts;
4280 for (i = 0; trace_options[i]; i++) {
4281 if (tr->trace_flags & (1 << i))
4282 seq_printf(m, "%s\n", trace_options[i]);
4284 seq_printf(m, "no%s\n", trace_options[i]);
4287 for (i = 0; trace_opts[i].name; i++) {
4288 if (tracer_flags & trace_opts[i].bit)
4289 seq_printf(m, "%s\n", trace_opts[i].name);
4291 seq_printf(m, "no%s\n", trace_opts[i].name);
4293 mutex_unlock(&trace_types_lock);
4298 static int __set_tracer_option(struct trace_array *tr,
4299 struct tracer_flags *tracer_flags,
4300 struct tracer_opt *opts, int neg)
4302 struct tracer *trace = tracer_flags->trace;
4305 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4310 tracer_flags->val &= ~opts->bit;
4312 tracer_flags->val |= opts->bit;
4316 /* Try to assign a tracer specific option */
4317 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4319 struct tracer *trace = tr->current_trace;
4320 struct tracer_flags *tracer_flags = trace->flags;
4321 struct tracer_opt *opts = NULL;
4324 for (i = 0; tracer_flags->opts[i].name; i++) {
4325 opts = &tracer_flags->opts[i];
4327 if (strcmp(cmp, opts->name) == 0)
4328 return __set_tracer_option(tr, trace->flags, opts, neg);
4334 /* Some tracers require overwrite to stay enabled */
4335 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4337 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4343 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4345 /* do nothing if flag is already set */
4346 if (!!(tr->trace_flags & mask) == !!enabled)
4349 /* Give the tracer a chance to approve the change */
4350 if (tr->current_trace->flag_changed)
4351 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4355 tr->trace_flags |= mask;
4357 tr->trace_flags &= ~mask;
4359 if (mask == TRACE_ITER_RECORD_CMD)
4360 trace_event_enable_cmd_record(enabled);
4362 if (mask == TRACE_ITER_RECORD_TGID) {
4364 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4368 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4372 trace_event_enable_tgid_record(enabled);
4375 if (mask == TRACE_ITER_EVENT_FORK)
4376 trace_event_follow_fork(tr, enabled);
4378 if (mask == TRACE_ITER_FUNC_FORK)
4379 ftrace_pid_follow_fork(tr, enabled);
4381 if (mask == TRACE_ITER_OVERWRITE) {
4382 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4383 #ifdef CONFIG_TRACER_MAX_TRACE
4384 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4388 if (mask == TRACE_ITER_PRINTK) {
4389 trace_printk_start_stop_comm(enabled);
4390 trace_printk_control(enabled);
4396 static int trace_set_options(struct trace_array *tr, char *option)
4401 size_t orig_len = strlen(option);
4403 cmp = strstrip(option);
4405 if (strncmp(cmp, "no", 2) == 0) {
4410 mutex_lock(&trace_types_lock);
4412 ret = match_string(trace_options, -1, cmp);
4413 /* If no option could be set, test the specific tracer options */
4415 ret = set_tracer_option(tr, cmp, neg);
4417 ret = set_tracer_flag(tr, 1 << ret, !neg);
4419 mutex_unlock(&trace_types_lock);
4422 * If the first trailing whitespace is replaced with '\0' by strstrip,
4423 * turn it back into a space.
4425 if (orig_len > strlen(option))
4426 option[strlen(option)] = ' ';
4431 static void __init apply_trace_boot_options(void)
4433 char *buf = trace_boot_options_buf;
4437 option = strsep(&buf, ",");
4443 trace_set_options(&global_trace, option);
4445 /* Put back the comma to allow this to be called again */
4452 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4453 size_t cnt, loff_t *ppos)
4455 struct seq_file *m = filp->private_data;
4456 struct trace_array *tr = m->private;
4460 if (cnt >= sizeof(buf))
4463 if (copy_from_user(buf, ubuf, cnt))
4468 ret = trace_set_options(tr, buf);
4477 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4479 struct trace_array *tr = inode->i_private;
4482 if (tracing_disabled)
4485 if (trace_array_get(tr) < 0)
4488 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4490 trace_array_put(tr);
4495 static const struct file_operations tracing_iter_fops = {
4496 .open = tracing_trace_options_open,
4498 .llseek = seq_lseek,
4499 .release = tracing_single_release_tr,
4500 .write = tracing_trace_options_write,
4503 static const char readme_msg[] =
4504 "tracing mini-HOWTO:\n\n"
4505 "# echo 0 > tracing_on : quick way to disable tracing\n"
4506 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4507 " Important files:\n"
4508 " trace\t\t\t- The static contents of the buffer\n"
4509 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4510 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4511 " current_tracer\t- function and latency tracers\n"
4512 " available_tracers\t- list of configured tracers for current_tracer\n"
4513 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4514 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4515 " trace_clock\t\t-change the clock used to order events\n"
4516 " local: Per cpu clock but may not be synced across CPUs\n"
4517 " global: Synced across CPUs but slows tracing down.\n"
4518 " counter: Not a clock, but just an increment\n"
4519 " uptime: Jiffy counter from time of boot\n"
4520 " perf: Same clock that perf events use\n"
4521 #ifdef CONFIG_X86_64
4522 " x86-tsc: TSC cycle counter\n"
4524 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4525 " delta: Delta difference against a buffer-wide timestamp\n"
4526 " absolute: Absolute (standalone) timestamp\n"
4527 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4528 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4529 " tracing_cpumask\t- Limit which CPUs to trace\n"
4530 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4531 "\t\t\t Remove sub-buffer with rmdir\n"
4532 " trace_options\t\t- Set format or modify how tracing happens\n"
4533 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4534 "\t\t\t option name\n"
4535 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4536 #ifdef CONFIG_DYNAMIC_FTRACE
4537 "\n available_filter_functions - list of functions that can be filtered on\n"
4538 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4539 "\t\t\t functions\n"
4540 "\t accepts: func_full_name or glob-matching-pattern\n"
4541 "\t modules: Can select a group via module\n"
4542 "\t Format: :mod:<module-name>\n"
4543 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4544 "\t triggers: a command to perform when function is hit\n"
4545 "\t Format: <function>:<trigger>[:count]\n"
4546 "\t trigger: traceon, traceoff\n"
4547 "\t\t enable_event:<system>:<event>\n"
4548 "\t\t disable_event:<system>:<event>\n"
4549 #ifdef CONFIG_STACKTRACE
4552 #ifdef CONFIG_TRACER_SNAPSHOT
4557 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4558 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4559 "\t The first one will disable tracing every time do_fault is hit\n"
4560 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4561 "\t The first time do trap is hit and it disables tracing, the\n"
4562 "\t counter will decrement to 2. If tracing is already disabled,\n"
4563 "\t the counter will not decrement. It only decrements when the\n"
4564 "\t trigger did work\n"
4565 "\t To remove trigger without count:\n"
4566 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4567 "\t To remove trigger with a count:\n"
4568 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4569 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4570 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4571 "\t modules: Can select a group via module command :mod:\n"
4572 "\t Does not accept triggers\n"
4573 #endif /* CONFIG_DYNAMIC_FTRACE */
4574 #ifdef CONFIG_FUNCTION_TRACER
4575 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4578 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4579 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4580 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4581 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4583 #ifdef CONFIG_TRACER_SNAPSHOT
4584 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4585 "\t\t\t snapshot buffer. Read the contents for more\n"
4586 "\t\t\t information\n"
4588 #ifdef CONFIG_STACK_TRACER
4589 " stack_trace\t\t- Shows the max stack trace when active\n"
4590 " stack_max_size\t- Shows current max stack size that was traced\n"
4591 "\t\t\t Write into this file to reset the max size (trigger a\n"
4592 "\t\t\t new trace)\n"
4593 #ifdef CONFIG_DYNAMIC_FTRACE
4594 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4597 #endif /* CONFIG_STACK_TRACER */
4598 #ifdef CONFIG_KPROBE_EVENTS
4599 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4600 "\t\t\t Write into this file to define/undefine new trace events.\n"
4602 #ifdef CONFIG_UPROBE_EVENTS
4603 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4604 "\t\t\t Write into this file to define/undefine new trace events.\n"
4606 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4607 "\t accepts: event-definitions (one definition per line)\n"
4608 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4609 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4610 "\t -:[<group>/]<event>\n"
4611 #ifdef CONFIG_KPROBE_EVENTS
4612 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4613 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4615 #ifdef CONFIG_UPROBE_EVENTS
4616 "\t place: <path>:<offset>\n"
4618 "\t args: <name>=fetcharg[:type]\n"
4619 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4620 "\t $stack<index>, $stack, $retval, $comm\n"
4621 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4622 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4624 " events/\t\t- Directory containing all trace event subsystems:\n"
4625 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4626 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4627 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4629 " filter\t\t- If set, only events passing filter are traced\n"
4630 " events/<system>/<event>/\t- Directory containing control files for\n"
4632 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4633 " filter\t\t- If set, only events passing filter are traced\n"
4634 " trigger\t\t- If set, a command to perform when event is hit\n"
4635 "\t Format: <trigger>[:count][if <filter>]\n"
4636 "\t trigger: traceon, traceoff\n"
4637 "\t enable_event:<system>:<event>\n"
4638 "\t disable_event:<system>:<event>\n"
4639 #ifdef CONFIG_HIST_TRIGGERS
4640 "\t enable_hist:<system>:<event>\n"
4641 "\t disable_hist:<system>:<event>\n"
4643 #ifdef CONFIG_STACKTRACE
4646 #ifdef CONFIG_TRACER_SNAPSHOT
4649 #ifdef CONFIG_HIST_TRIGGERS
4650 "\t\t hist (see below)\n"
4652 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4653 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4654 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4655 "\t events/block/block_unplug/trigger\n"
4656 "\t The first disables tracing every time block_unplug is hit.\n"
4657 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4658 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4659 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4660 "\t Like function triggers, the counter is only decremented if it\n"
4661 "\t enabled or disabled tracing.\n"
4662 "\t To remove a trigger without a count:\n"
4663 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4664 "\t To remove a trigger with a count:\n"
4665 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4666 "\t Filters can be ignored when removing a trigger.\n"
4667 #ifdef CONFIG_HIST_TRIGGERS
4668 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4669 "\t Format: hist:keys=<field1[,field2,...]>\n"
4670 "\t [:values=<field1[,field2,...]>]\n"
4671 "\t [:sort=<field1[,field2,...]>]\n"
4672 "\t [:size=#entries]\n"
4673 "\t [:pause][:continue][:clear]\n"
4674 "\t [:name=histname1]\n"
4675 "\t [if <filter>]\n\n"
4676 "\t When a matching event is hit, an entry is added to a hash\n"
4677 "\t table using the key(s) and value(s) named, and the value of a\n"
4678 "\t sum called 'hitcount' is incremented. Keys and values\n"
4679 "\t correspond to fields in the event's format description. Keys\n"
4680 "\t can be any field, or the special string 'stacktrace'.\n"
4681 "\t Compound keys consisting of up to two fields can be specified\n"
4682 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4683 "\t fields. Sort keys consisting of up to two fields can be\n"
4684 "\t specified using the 'sort' keyword. The sort direction can\n"
4685 "\t be modified by appending '.descending' or '.ascending' to a\n"
4686 "\t sort field. The 'size' parameter can be used to specify more\n"
4687 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4688 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4689 "\t its histogram data will be shared with other triggers of the\n"
4690 "\t same name, and trigger hits will update this common data.\n\n"
4691 "\t Reading the 'hist' file for the event will dump the hash\n"
4692 "\t table in its entirety to stdout. If there are multiple hist\n"
4693 "\t triggers attached to an event, there will be a table for each\n"
4694 "\t trigger in the output. The table displayed for a named\n"
4695 "\t trigger will be the same as any other instance having the\n"
4696 "\t same name. The default format used to display a given field\n"
4697 "\t can be modified by appending any of the following modifiers\n"
4698 "\t to the field name, as applicable:\n\n"
4699 "\t .hex display a number as a hex value\n"
4700 "\t .sym display an address as a symbol\n"
4701 "\t .sym-offset display an address as a symbol and offset\n"
4702 "\t .execname display a common_pid as a program name\n"
4703 "\t .syscall display a syscall id as a syscall name\n"
4704 "\t .log2 display log2 value rather than raw number\n"
4705 "\t .usecs display a common_timestamp in microseconds\n\n"
4706 "\t The 'pause' parameter can be used to pause an existing hist\n"
4707 "\t trigger or to start a hist trigger but not log any events\n"
4708 "\t until told to do so. 'continue' can be used to start or\n"
4709 "\t restart a paused hist trigger.\n\n"
4710 "\t The 'clear' parameter will clear the contents of a running\n"
4711 "\t hist trigger and leave its current paused/active state\n"
4713 "\t The enable_hist and disable_hist triggers can be used to\n"
4714 "\t have one event conditionally start and stop another event's\n"
4715 "\t already-attached hist trigger. The syntax is analagous to\n"
4716 "\t the enable_event and disable_event triggers.\n"
4721 tracing_readme_read(struct file *filp, char __user *ubuf,
4722 size_t cnt, loff_t *ppos)
4724 return simple_read_from_buffer(ubuf, cnt, ppos,
4725 readme_msg, strlen(readme_msg));
4728 static const struct file_operations tracing_readme_fops = {
4729 .open = tracing_open_generic,
4730 .read = tracing_readme_read,
4731 .llseek = generic_file_llseek,
4734 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4738 if (*pos || m->count)
4743 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4744 if (trace_find_tgid(*ptr))
4751 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4761 v = saved_tgids_next(m, v, &l);
4769 static void saved_tgids_stop(struct seq_file *m, void *v)
4773 static int saved_tgids_show(struct seq_file *m, void *v)
4775 int pid = (int *)v - tgid_map;
4777 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4781 static const struct seq_operations tracing_saved_tgids_seq_ops = {
4782 .start = saved_tgids_start,
4783 .stop = saved_tgids_stop,
4784 .next = saved_tgids_next,
4785 .show = saved_tgids_show,
4788 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4790 if (tracing_disabled)
4793 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4797 static const struct file_operations tracing_saved_tgids_fops = {
4798 .open = tracing_saved_tgids_open,
4800 .llseek = seq_lseek,
4801 .release = seq_release,
4804 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4806 unsigned int *ptr = v;
4808 if (*pos || m->count)
4813 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4815 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4824 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4830 arch_spin_lock(&trace_cmdline_lock);
4832 v = &savedcmd->map_cmdline_to_pid[0];
4834 v = saved_cmdlines_next(m, v, &l);
4842 static void saved_cmdlines_stop(struct seq_file *m, void *v)
4844 arch_spin_unlock(&trace_cmdline_lock);
4848 static int saved_cmdlines_show(struct seq_file *m, void *v)
4850 char buf[TASK_COMM_LEN];
4851 unsigned int *pid = v;
4853 __trace_find_cmdline(*pid, buf);
4854 seq_printf(m, "%d %s\n", *pid, buf);
4858 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4859 .start = saved_cmdlines_start,
4860 .next = saved_cmdlines_next,
4861 .stop = saved_cmdlines_stop,
4862 .show = saved_cmdlines_show,
4865 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4867 if (tracing_disabled)
4870 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4873 static const struct file_operations tracing_saved_cmdlines_fops = {
4874 .open = tracing_saved_cmdlines_open,
4876 .llseek = seq_lseek,
4877 .release = seq_release,
4881 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4882 size_t cnt, loff_t *ppos)
4887 arch_spin_lock(&trace_cmdline_lock);
4888 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4889 arch_spin_unlock(&trace_cmdline_lock);
4891 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4894 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4896 kfree(s->saved_cmdlines);
4897 kfree(s->map_cmdline_to_pid);
4901 static int tracing_resize_saved_cmdlines(unsigned int val)
4903 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4905 s = kmalloc(sizeof(*s), GFP_KERNEL);
4909 if (allocate_cmdlines_buffer(val, s) < 0) {
4914 arch_spin_lock(&trace_cmdline_lock);
4915 savedcmd_temp = savedcmd;
4917 arch_spin_unlock(&trace_cmdline_lock);
4918 free_saved_cmdlines_buffer(savedcmd_temp);
4924 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4925 size_t cnt, loff_t *ppos)
4930 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4934 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4935 if (!val || val > PID_MAX_DEFAULT)
4938 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4947 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4948 .open = tracing_open_generic,
4949 .read = tracing_saved_cmdlines_size_read,
4950 .write = tracing_saved_cmdlines_size_write,
4953 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4954 static union trace_eval_map_item *
4955 update_eval_map(union trace_eval_map_item *ptr)
4957 if (!ptr->map.eval_string) {
4958 if (ptr->tail.next) {
4959 ptr = ptr->tail.next;
4960 /* Set ptr to the next real item (skip head) */
4968 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
4970 union trace_eval_map_item *ptr = v;
4973 * Paranoid! If ptr points to end, we don't want to increment past it.
4974 * This really should never happen.
4976 ptr = update_eval_map(ptr);
4977 if (WARN_ON_ONCE(!ptr))
4984 ptr = update_eval_map(ptr);
4989 static void *eval_map_start(struct seq_file *m, loff_t *pos)
4991 union trace_eval_map_item *v;
4994 mutex_lock(&trace_eval_mutex);
4996 v = trace_eval_maps;
5000 while (v && l < *pos) {
5001 v = eval_map_next(m, v, &l);
5007 static void eval_map_stop(struct seq_file *m, void *v)
5009 mutex_unlock(&trace_eval_mutex);
5012 static int eval_map_show(struct seq_file *m, void *v)
5014 union trace_eval_map_item *ptr = v;
5016 seq_printf(m, "%s %ld (%s)\n",
5017 ptr->map.eval_string, ptr->map.eval_value,
5023 static const struct seq_operations tracing_eval_map_seq_ops = {
5024 .start = eval_map_start,
5025 .next = eval_map_next,
5026 .stop = eval_map_stop,
5027 .show = eval_map_show,
5030 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5032 if (tracing_disabled)
5035 return seq_open(filp, &tracing_eval_map_seq_ops);
5038 static const struct file_operations tracing_eval_map_fops = {
5039 .open = tracing_eval_map_open,
5041 .llseek = seq_lseek,
5042 .release = seq_release,
5045 static inline union trace_eval_map_item *
5046 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5048 /* Return tail of array given the head */
5049 return ptr + ptr->head.length + 1;
5053 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5056 struct trace_eval_map **stop;
5057 struct trace_eval_map **map;
5058 union trace_eval_map_item *map_array;
5059 union trace_eval_map_item *ptr;
5064 * The trace_eval_maps contains the map plus a head and tail item,
5065 * where the head holds the module and length of array, and the
5066 * tail holds a pointer to the next list.
5068 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5070 pr_warn("Unable to allocate trace eval mapping\n");
5074 mutex_lock(&trace_eval_mutex);
5076 if (!trace_eval_maps)
5077 trace_eval_maps = map_array;
5079 ptr = trace_eval_maps;
5081 ptr = trace_eval_jmp_to_tail(ptr);
5082 if (!ptr->tail.next)
5084 ptr = ptr->tail.next;
5087 ptr->tail.next = map_array;
5089 map_array->head.mod = mod;
5090 map_array->head.length = len;
5093 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5094 map_array->map = **map;
5097 memset(map_array, 0, sizeof(*map_array));
5099 mutex_unlock(&trace_eval_mutex);
5102 static void trace_create_eval_file(struct dentry *d_tracer)
5104 trace_create_file("eval_map", 0444, d_tracer,
5105 NULL, &tracing_eval_map_fops);
5108 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5109 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5110 static inline void trace_insert_eval_map_file(struct module *mod,
5111 struct trace_eval_map **start, int len) { }
5112 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5114 static void trace_insert_eval_map(struct module *mod,
5115 struct trace_eval_map **start, int len)
5117 struct trace_eval_map **map;
5124 trace_event_eval_update(map, len);
5126 trace_insert_eval_map_file(mod, start, len);
5130 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5131 size_t cnt, loff_t *ppos)
5133 struct trace_array *tr = filp->private_data;
5134 char buf[MAX_TRACER_SIZE+2];
5137 mutex_lock(&trace_types_lock);
5138 r = sprintf(buf, "%s\n", tr->current_trace->name);
5139 mutex_unlock(&trace_types_lock);
5141 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5144 int tracer_init(struct tracer *t, struct trace_array *tr)
5146 tracing_reset_online_cpus(&tr->trace_buffer);
5150 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5154 for_each_tracing_cpu(cpu)
5155 per_cpu_ptr(buf->data, cpu)->entries = val;
5158 #ifdef CONFIG_TRACER_MAX_TRACE
5159 /* resize @tr's buffer to the size of @size_tr's entries */
5160 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5161 struct trace_buffer *size_buf, int cpu_id)
5165 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5166 for_each_tracing_cpu(cpu) {
5167 ret = ring_buffer_resize(trace_buf->buffer,
5168 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5171 per_cpu_ptr(trace_buf->data, cpu)->entries =
5172 per_cpu_ptr(size_buf->data, cpu)->entries;
5175 ret = ring_buffer_resize(trace_buf->buffer,
5176 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5178 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5179 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5184 #endif /* CONFIG_TRACER_MAX_TRACE */
5186 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5187 unsigned long size, int cpu)
5192 * If kernel or user changes the size of the ring buffer
5193 * we use the size that was given, and we can forget about
5194 * expanding it later.
5196 ring_buffer_expanded = true;
5198 /* May be called before buffers are initialized */
5199 if (!tr->trace_buffer.buffer)
5202 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5206 #ifdef CONFIG_TRACER_MAX_TRACE
5207 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5208 !tr->current_trace->use_max_tr)
5211 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5213 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5214 &tr->trace_buffer, cpu);
5217 * AARGH! We are left with different
5218 * size max buffer!!!!
5219 * The max buffer is our "snapshot" buffer.
5220 * When a tracer needs a snapshot (one of the
5221 * latency tracers), it swaps the max buffer
5222 * with the saved snap shot. We succeeded to
5223 * update the size of the main buffer, but failed to
5224 * update the size of the max buffer. But when we tried
5225 * to reset the main buffer to the original size, we
5226 * failed there too. This is very unlikely to
5227 * happen, but if it does, warn and kill all
5231 tracing_disabled = 1;
5236 if (cpu == RING_BUFFER_ALL_CPUS)
5237 set_buffer_entries(&tr->max_buffer, size);
5239 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5242 #endif /* CONFIG_TRACER_MAX_TRACE */
5244 if (cpu == RING_BUFFER_ALL_CPUS)
5245 set_buffer_entries(&tr->trace_buffer, size);
5247 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5252 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5253 unsigned long size, int cpu_id)
5257 mutex_lock(&trace_types_lock);
5259 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5260 /* make sure, this cpu is enabled in the mask */
5261 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5267 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5272 mutex_unlock(&trace_types_lock);
5279 * tracing_update_buffers - used by tracing facility to expand ring buffers
5281 * To save on memory when the tracing is never used on a system with it
5282 * configured in. The ring buffers are set to a minimum size. But once
5283 * a user starts to use the tracing facility, then they need to grow
5284 * to their default size.
5286 * This function is to be called when a tracer is about to be used.
5288 int tracing_update_buffers(void)
5292 mutex_lock(&trace_types_lock);
5293 if (!ring_buffer_expanded)
5294 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5295 RING_BUFFER_ALL_CPUS);
5296 mutex_unlock(&trace_types_lock);
5301 struct trace_option_dentry;
5304 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5307 * Used to clear out the tracer before deletion of an instance.
5308 * Must have trace_types_lock held.
5310 static void tracing_set_nop(struct trace_array *tr)
5312 if (tr->current_trace == &nop_trace)
5315 tr->current_trace->enabled--;
5317 if (tr->current_trace->reset)
5318 tr->current_trace->reset(tr);
5320 tr->current_trace = &nop_trace;
5323 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5325 /* Only enable if the directory has been created already. */
5329 create_trace_option_files(tr, t);
5332 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5335 #ifdef CONFIG_TRACER_MAX_TRACE
5340 mutex_lock(&trace_types_lock);
5342 if (!ring_buffer_expanded) {
5343 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5344 RING_BUFFER_ALL_CPUS);
5350 for (t = trace_types; t; t = t->next) {
5351 if (strcmp(t->name, buf) == 0)
5358 if (t == tr->current_trace)
5361 /* Some tracers won't work on kernel command line */
5362 if (system_state < SYSTEM_RUNNING && t->noboot) {
5363 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5368 /* Some tracers are only allowed for the top level buffer */
5369 if (!trace_ok_for_array(t, tr)) {
5374 /* If trace pipe files are being read, we can't change the tracer */
5375 if (tr->current_trace->ref) {
5380 trace_branch_disable();
5382 tr->current_trace->enabled--;
5384 if (tr->current_trace->reset)
5385 tr->current_trace->reset(tr);
5387 /* Current trace needs to be nop_trace before synchronize_sched */
5388 tr->current_trace = &nop_trace;
5390 #ifdef CONFIG_TRACER_MAX_TRACE
5391 had_max_tr = tr->allocated_snapshot;
5393 if (had_max_tr && !t->use_max_tr) {
5395 * We need to make sure that the update_max_tr sees that
5396 * current_trace changed to nop_trace to keep it from
5397 * swapping the buffers after we resize it.
5398 * The update_max_tr is called from interrupts disabled
5399 * so a synchronized_sched() is sufficient.
5401 synchronize_sched();
5406 #ifdef CONFIG_TRACER_MAX_TRACE
5407 if (t->use_max_tr && !had_max_tr) {
5408 ret = tracing_alloc_snapshot_instance(tr);
5415 ret = tracer_init(t, tr);
5420 tr->current_trace = t;
5421 tr->current_trace->enabled++;
5422 trace_branch_enable(tr);
5424 mutex_unlock(&trace_types_lock);
5430 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5431 size_t cnt, loff_t *ppos)
5433 struct trace_array *tr = filp->private_data;
5434 char buf[MAX_TRACER_SIZE+1];
5441 if (cnt > MAX_TRACER_SIZE)
5442 cnt = MAX_TRACER_SIZE;
5444 if (copy_from_user(buf, ubuf, cnt))
5449 /* strip ending whitespace. */
5450 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5453 err = tracing_set_tracer(tr, buf);
5463 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5464 size_t cnt, loff_t *ppos)
5469 r = snprintf(buf, sizeof(buf), "%ld\n",
5470 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5471 if (r > sizeof(buf))
5473 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5477 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5478 size_t cnt, loff_t *ppos)
5483 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5493 tracing_thresh_read(struct file *filp, char __user *ubuf,
5494 size_t cnt, loff_t *ppos)
5496 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5500 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5501 size_t cnt, loff_t *ppos)
5503 struct trace_array *tr = filp->private_data;
5506 mutex_lock(&trace_types_lock);
5507 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5511 if (tr->current_trace->update_thresh) {
5512 ret = tr->current_trace->update_thresh(tr);
5519 mutex_unlock(&trace_types_lock);
5524 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5527 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5528 size_t cnt, loff_t *ppos)
5530 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5534 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5535 size_t cnt, loff_t *ppos)
5537 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5542 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5544 struct trace_array *tr = inode->i_private;
5545 struct trace_iterator *iter;
5548 if (tracing_disabled)
5551 if (trace_array_get(tr) < 0)
5554 mutex_lock(&trace_types_lock);
5556 /* create a buffer to store the information to pass to userspace */
5557 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5560 __trace_array_put(tr);
5564 trace_seq_init(&iter->seq);
5565 iter->trace = tr->current_trace;
5567 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5572 /* trace pipe does not show start of buffer */
5573 cpumask_setall(iter->started);
5575 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5576 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5578 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5579 if (trace_clocks[tr->clock_id].in_ns)
5580 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5583 iter->trace_buffer = &tr->trace_buffer;
5584 iter->cpu_file = tracing_get_cpu(inode);
5585 mutex_init(&iter->mutex);
5586 filp->private_data = iter;
5588 if (iter->trace->pipe_open)
5589 iter->trace->pipe_open(iter);
5591 nonseekable_open(inode, filp);
5593 tr->current_trace->ref++;
5595 mutex_unlock(&trace_types_lock);
5601 __trace_array_put(tr);
5602 mutex_unlock(&trace_types_lock);
5606 static int tracing_release_pipe(struct inode *inode, struct file *file)
5608 struct trace_iterator *iter = file->private_data;
5609 struct trace_array *tr = inode->i_private;
5611 mutex_lock(&trace_types_lock);
5613 tr->current_trace->ref--;
5615 if (iter->trace->pipe_close)
5616 iter->trace->pipe_close(iter);
5618 mutex_unlock(&trace_types_lock);
5620 free_cpumask_var(iter->started);
5621 mutex_destroy(&iter->mutex);
5624 trace_array_put(tr);
5630 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5632 struct trace_array *tr = iter->tr;
5634 /* Iterators are static, they should be filled or empty */
5635 if (trace_buffer_iter(iter, iter->cpu_file))
5636 return EPOLLIN | EPOLLRDNORM;
5638 if (tr->trace_flags & TRACE_ITER_BLOCK)
5640 * Always select as readable when in blocking mode
5642 return EPOLLIN | EPOLLRDNORM;
5644 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5649 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5651 struct trace_iterator *iter = filp->private_data;
5653 return trace_poll(iter, filp, poll_table);
5656 /* Must be called with iter->mutex held. */
5657 static int tracing_wait_pipe(struct file *filp)
5659 struct trace_iterator *iter = filp->private_data;
5662 while (trace_empty(iter)) {
5664 if ((filp->f_flags & O_NONBLOCK)) {
5669 * We block until we read something and tracing is disabled.
5670 * We still block if tracing is disabled, but we have never
5671 * read anything. This allows a user to cat this file, and
5672 * then enable tracing. But after we have read something,
5673 * we give an EOF when tracing is again disabled.
5675 * iter->pos will be 0 if we haven't read anything.
5677 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5680 mutex_unlock(&iter->mutex);
5682 ret = wait_on_pipe(iter, false);
5684 mutex_lock(&iter->mutex);
5697 tracing_read_pipe(struct file *filp, char __user *ubuf,
5698 size_t cnt, loff_t *ppos)
5700 struct trace_iterator *iter = filp->private_data;
5704 * Avoid more than one consumer on a single file descriptor
5705 * This is just a matter of traces coherency, the ring buffer itself
5708 mutex_lock(&iter->mutex);
5710 /* return any leftover data */
5711 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5715 trace_seq_init(&iter->seq);
5717 if (iter->trace->read) {
5718 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5724 sret = tracing_wait_pipe(filp);
5728 /* stop when tracing is finished */
5729 if (trace_empty(iter)) {
5734 if (cnt >= PAGE_SIZE)
5735 cnt = PAGE_SIZE - 1;
5737 /* reset all but tr, trace, and overruns */
5738 memset(&iter->seq, 0,
5739 sizeof(struct trace_iterator) -
5740 offsetof(struct trace_iterator, seq));
5741 cpumask_clear(iter->started);
5744 trace_event_read_lock();
5745 trace_access_lock(iter->cpu_file);
5746 while (trace_find_next_entry_inc(iter) != NULL) {
5747 enum print_line_t ret;
5748 int save_len = iter->seq.seq.len;
5750 ret = print_trace_line(iter);
5751 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5752 /* don't print partial lines */
5753 iter->seq.seq.len = save_len;
5756 if (ret != TRACE_TYPE_NO_CONSUME)
5757 trace_consume(iter);
5759 if (trace_seq_used(&iter->seq) >= cnt)
5763 * Setting the full flag means we reached the trace_seq buffer
5764 * size and we should leave by partial output condition above.
5765 * One of the trace_seq_* functions is not used properly.
5767 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5770 trace_access_unlock(iter->cpu_file);
5771 trace_event_read_unlock();
5773 /* Now copy what we have to the user */
5774 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5775 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5776 trace_seq_init(&iter->seq);
5779 * If there was nothing to send to user, in spite of consuming trace
5780 * entries, go back to wait for more entries.
5786 mutex_unlock(&iter->mutex);
5791 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5794 __free_page(spd->pages[idx]);
5797 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5799 .confirm = generic_pipe_buf_confirm,
5800 .release = generic_pipe_buf_release,
5801 .steal = generic_pipe_buf_steal,
5802 .get = generic_pipe_buf_get,
5806 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5812 /* Seq buffer is page-sized, exactly what we need. */
5814 save_len = iter->seq.seq.len;
5815 ret = print_trace_line(iter);
5817 if (trace_seq_has_overflowed(&iter->seq)) {
5818 iter->seq.seq.len = save_len;
5823 * This should not be hit, because it should only
5824 * be set if the iter->seq overflowed. But check it
5825 * anyway to be safe.
5827 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5828 iter->seq.seq.len = save_len;
5832 count = trace_seq_used(&iter->seq) - save_len;
5835 iter->seq.seq.len = save_len;
5839 if (ret != TRACE_TYPE_NO_CONSUME)
5840 trace_consume(iter);
5842 if (!trace_find_next_entry_inc(iter)) {
5852 static ssize_t tracing_splice_read_pipe(struct file *filp,
5854 struct pipe_inode_info *pipe,
5858 struct page *pages_def[PIPE_DEF_BUFFERS];
5859 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5860 struct trace_iterator *iter = filp->private_data;
5861 struct splice_pipe_desc spd = {
5863 .partial = partial_def,
5864 .nr_pages = 0, /* This gets updated below. */
5865 .nr_pages_max = PIPE_DEF_BUFFERS,
5866 .ops = &tracing_pipe_buf_ops,
5867 .spd_release = tracing_spd_release_pipe,
5873 if (splice_grow_spd(pipe, &spd))
5876 mutex_lock(&iter->mutex);
5878 if (iter->trace->splice_read) {
5879 ret = iter->trace->splice_read(iter, filp,
5880 ppos, pipe, len, flags);
5885 ret = tracing_wait_pipe(filp);
5889 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5894 trace_event_read_lock();
5895 trace_access_lock(iter->cpu_file);
5897 /* Fill as many pages as possible. */
5898 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5899 spd.pages[i] = alloc_page(GFP_KERNEL);
5903 rem = tracing_fill_pipe_page(rem, iter);
5905 /* Copy the data into the page, so we can start over. */
5906 ret = trace_seq_to_buffer(&iter->seq,
5907 page_address(spd.pages[i]),
5908 trace_seq_used(&iter->seq));
5910 __free_page(spd.pages[i]);
5913 spd.partial[i].offset = 0;
5914 spd.partial[i].len = trace_seq_used(&iter->seq);
5916 trace_seq_init(&iter->seq);
5919 trace_access_unlock(iter->cpu_file);
5920 trace_event_read_unlock();
5921 mutex_unlock(&iter->mutex);
5926 ret = splice_to_pipe(pipe, &spd);
5930 splice_shrink_spd(&spd);
5934 mutex_unlock(&iter->mutex);
5939 tracing_entries_read(struct file *filp, char __user *ubuf,
5940 size_t cnt, loff_t *ppos)
5942 struct inode *inode = file_inode(filp);
5943 struct trace_array *tr = inode->i_private;
5944 int cpu = tracing_get_cpu(inode);
5949 mutex_lock(&trace_types_lock);
5951 if (cpu == RING_BUFFER_ALL_CPUS) {
5952 int cpu, buf_size_same;
5957 /* check if all cpu sizes are same */
5958 for_each_tracing_cpu(cpu) {
5959 /* fill in the size from first enabled cpu */
5961 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5962 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5968 if (buf_size_same) {
5969 if (!ring_buffer_expanded)
5970 r = sprintf(buf, "%lu (expanded: %lu)\n",
5972 trace_buf_size >> 10);
5974 r = sprintf(buf, "%lu\n", size >> 10);
5976 r = sprintf(buf, "X\n");
5978 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5980 mutex_unlock(&trace_types_lock);
5982 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5987 tracing_entries_write(struct file *filp, const char __user *ubuf,
5988 size_t cnt, loff_t *ppos)
5990 struct inode *inode = file_inode(filp);
5991 struct trace_array *tr = inode->i_private;
5995 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5999 /* must have at least 1 entry */
6003 /* value is in KB */
6005 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6015 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6016 size_t cnt, loff_t *ppos)
6018 struct trace_array *tr = filp->private_data;
6021 unsigned long size = 0, expanded_size = 0;
6023 mutex_lock(&trace_types_lock);
6024 for_each_tracing_cpu(cpu) {
6025 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6026 if (!ring_buffer_expanded)
6027 expanded_size += trace_buf_size >> 10;
6029 if (ring_buffer_expanded)
6030 r = sprintf(buf, "%lu\n", size);
6032 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6033 mutex_unlock(&trace_types_lock);
6035 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6039 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6040 size_t cnt, loff_t *ppos)
6043 * There is no need to read what the user has written, this function
6044 * is just to make sure that there is no error when "echo" is used
6053 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6055 struct trace_array *tr = inode->i_private;
6057 /* disable tracing ? */
6058 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6059 tracer_tracing_off(tr);
6060 /* resize the ring buffer to 0 */
6061 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6063 trace_array_put(tr);
6069 tracing_mark_write(struct file *filp, const char __user *ubuf,
6070 size_t cnt, loff_t *fpos)
6072 struct trace_array *tr = filp->private_data;
6073 struct ring_buffer_event *event;
6074 enum event_trigger_type tt = ETT_NONE;
6075 struct ring_buffer *buffer;
6076 struct print_entry *entry;
6077 unsigned long irq_flags;
6078 const char faulted[] = "<faulted>";
6083 /* Used in tracing_mark_raw_write() as well */
6084 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6086 if (tracing_disabled)
6089 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6092 if (cnt > TRACE_BUF_SIZE)
6093 cnt = TRACE_BUF_SIZE;
6095 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6097 local_save_flags(irq_flags);
6098 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6100 /* If less than "<faulted>", then make sure we can still add that */
6101 if (cnt < FAULTED_SIZE)
6102 size += FAULTED_SIZE - cnt;
6104 buffer = tr->trace_buffer.buffer;
6105 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6106 irq_flags, preempt_count());
6107 if (unlikely(!event))
6108 /* Ring buffer disabled, return as if not open for write */
6111 entry = ring_buffer_event_data(event);
6112 entry->ip = _THIS_IP_;
6114 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6116 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6123 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6124 /* do not add \n before testing triggers, but add \0 */
6125 entry->buf[cnt] = '\0';
6126 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6129 if (entry->buf[cnt - 1] != '\n') {
6130 entry->buf[cnt] = '\n';
6131 entry->buf[cnt + 1] = '\0';
6133 entry->buf[cnt] = '\0';
6135 __buffer_unlock_commit(buffer, event);
6138 event_triggers_post_call(tr->trace_marker_file, tt);
6146 /* Limit it for now to 3K (including tag) */
6147 #define RAW_DATA_MAX_SIZE (1024*3)
6150 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6151 size_t cnt, loff_t *fpos)
6153 struct trace_array *tr = filp->private_data;
6154 struct ring_buffer_event *event;
6155 struct ring_buffer *buffer;
6156 struct raw_data_entry *entry;
6157 const char faulted[] = "<faulted>";
6158 unsigned long irq_flags;
6163 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6165 if (tracing_disabled)
6168 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6171 /* The marker must at least have a tag id */
6172 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6175 if (cnt > TRACE_BUF_SIZE)
6176 cnt = TRACE_BUF_SIZE;
6178 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6180 local_save_flags(irq_flags);
6181 size = sizeof(*entry) + cnt;
6182 if (cnt < FAULT_SIZE_ID)
6183 size += FAULT_SIZE_ID - cnt;
6185 buffer = tr->trace_buffer.buffer;
6186 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6187 irq_flags, preempt_count());
6189 /* Ring buffer disabled, return as if not open for write */
6192 entry = ring_buffer_event_data(event);
6194 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6197 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6202 __buffer_unlock_commit(buffer, event);
6210 static int tracing_clock_show(struct seq_file *m, void *v)
6212 struct trace_array *tr = m->private;
6215 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6217 "%s%s%s%s", i ? " " : "",
6218 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6219 i == tr->clock_id ? "]" : "");
6225 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6229 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6230 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6233 if (i == ARRAY_SIZE(trace_clocks))
6236 mutex_lock(&trace_types_lock);
6240 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6243 * New clock may not be consistent with the previous clock.
6244 * Reset the buffer so that it doesn't have incomparable timestamps.
6246 tracing_reset_online_cpus(&tr->trace_buffer);
6248 #ifdef CONFIG_TRACER_MAX_TRACE
6249 if (tr->max_buffer.buffer)
6250 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6251 tracing_reset_online_cpus(&tr->max_buffer);
6254 mutex_unlock(&trace_types_lock);
6259 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6260 size_t cnt, loff_t *fpos)
6262 struct seq_file *m = filp->private_data;
6263 struct trace_array *tr = m->private;
6265 const char *clockstr;
6268 if (cnt >= sizeof(buf))
6271 if (copy_from_user(buf, ubuf, cnt))
6276 clockstr = strstrip(buf);
6278 ret = tracing_set_clock(tr, clockstr);
6287 static int tracing_clock_open(struct inode *inode, struct file *file)
6289 struct trace_array *tr = inode->i_private;
6292 if (tracing_disabled)
6295 if (trace_array_get(tr))
6298 ret = single_open(file, tracing_clock_show, inode->i_private);
6300 trace_array_put(tr);
6305 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6307 struct trace_array *tr = m->private;
6309 mutex_lock(&trace_types_lock);
6311 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6312 seq_puts(m, "delta [absolute]\n");
6314 seq_puts(m, "[delta] absolute\n");
6316 mutex_unlock(&trace_types_lock);
6321 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6323 struct trace_array *tr = inode->i_private;
6326 if (tracing_disabled)
6329 if (trace_array_get(tr))
6332 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6334 trace_array_put(tr);
6339 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6343 mutex_lock(&trace_types_lock);
6345 if (abs && tr->time_stamp_abs_ref++)
6349 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6354 if (--tr->time_stamp_abs_ref)
6358 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6360 #ifdef CONFIG_TRACER_MAX_TRACE
6361 if (tr->max_buffer.buffer)
6362 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6365 mutex_unlock(&trace_types_lock);
6370 struct ftrace_buffer_info {
6371 struct trace_iterator iter;
6373 unsigned int spare_cpu;
6377 #ifdef CONFIG_TRACER_SNAPSHOT
6378 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6380 struct trace_array *tr = inode->i_private;
6381 struct trace_iterator *iter;
6385 if (trace_array_get(tr) < 0)
6388 if (file->f_mode & FMODE_READ) {
6389 iter = __tracing_open(inode, file, true);
6391 ret = PTR_ERR(iter);
6393 /* Writes still need the seq_file to hold the private data */
6395 m = kzalloc(sizeof(*m), GFP_KERNEL);
6398 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6406 iter->trace_buffer = &tr->max_buffer;
6407 iter->cpu_file = tracing_get_cpu(inode);
6409 file->private_data = m;
6413 trace_array_put(tr);
6419 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6422 struct seq_file *m = filp->private_data;
6423 struct trace_iterator *iter = m->private;
6424 struct trace_array *tr = iter->tr;
6428 ret = tracing_update_buffers();
6432 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6436 mutex_lock(&trace_types_lock);
6438 if (tr->current_trace->use_max_tr) {
6445 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6449 if (tr->allocated_snapshot)
6453 /* Only allow per-cpu swap if the ring buffer supports it */
6454 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6455 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6460 if (!tr->allocated_snapshot) {
6461 ret = tracing_alloc_snapshot_instance(tr);
6465 local_irq_disable();
6466 /* Now, we're going to swap */
6467 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6468 update_max_tr(tr, current, smp_processor_id());
6470 update_max_tr_single(tr, current, iter->cpu_file);
6474 if (tr->allocated_snapshot) {
6475 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6476 tracing_reset_online_cpus(&tr->max_buffer);
6478 tracing_reset(&tr->max_buffer, iter->cpu_file);
6488 mutex_unlock(&trace_types_lock);
6492 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6494 struct seq_file *m = file->private_data;
6497 ret = tracing_release(inode, file);
6499 if (file->f_mode & FMODE_READ)
6502 /* If write only, the seq_file is just a stub */
6510 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6511 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6512 size_t count, loff_t *ppos);
6513 static int tracing_buffers_release(struct inode *inode, struct file *file);
6514 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6515 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6517 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6519 struct ftrace_buffer_info *info;
6522 ret = tracing_buffers_open(inode, filp);
6526 info = filp->private_data;
6528 if (info->iter.trace->use_max_tr) {
6529 tracing_buffers_release(inode, filp);
6533 info->iter.snapshot = true;
6534 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6539 #endif /* CONFIG_TRACER_SNAPSHOT */
6542 static const struct file_operations tracing_thresh_fops = {
6543 .open = tracing_open_generic,
6544 .read = tracing_thresh_read,
6545 .write = tracing_thresh_write,
6546 .llseek = generic_file_llseek,
6549 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6550 static const struct file_operations tracing_max_lat_fops = {
6551 .open = tracing_open_generic,
6552 .read = tracing_max_lat_read,
6553 .write = tracing_max_lat_write,
6554 .llseek = generic_file_llseek,
6558 static const struct file_operations set_tracer_fops = {
6559 .open = tracing_open_generic,
6560 .read = tracing_set_trace_read,
6561 .write = tracing_set_trace_write,
6562 .llseek = generic_file_llseek,
6565 static const struct file_operations tracing_pipe_fops = {
6566 .open = tracing_open_pipe,
6567 .poll = tracing_poll_pipe,
6568 .read = tracing_read_pipe,
6569 .splice_read = tracing_splice_read_pipe,
6570 .release = tracing_release_pipe,
6571 .llseek = no_llseek,
6574 static const struct file_operations tracing_entries_fops = {
6575 .open = tracing_open_generic_tr,
6576 .read = tracing_entries_read,
6577 .write = tracing_entries_write,
6578 .llseek = generic_file_llseek,
6579 .release = tracing_release_generic_tr,
6582 static const struct file_operations tracing_total_entries_fops = {
6583 .open = tracing_open_generic_tr,
6584 .read = tracing_total_entries_read,
6585 .llseek = generic_file_llseek,
6586 .release = tracing_release_generic_tr,
6589 static const struct file_operations tracing_free_buffer_fops = {
6590 .open = tracing_open_generic_tr,
6591 .write = tracing_free_buffer_write,
6592 .release = tracing_free_buffer_release,
6595 static const struct file_operations tracing_mark_fops = {
6596 .open = tracing_open_generic_tr,
6597 .write = tracing_mark_write,
6598 .llseek = generic_file_llseek,
6599 .release = tracing_release_generic_tr,
6602 static const struct file_operations tracing_mark_raw_fops = {
6603 .open = tracing_open_generic_tr,
6604 .write = tracing_mark_raw_write,
6605 .llseek = generic_file_llseek,
6606 .release = tracing_release_generic_tr,
6609 static const struct file_operations trace_clock_fops = {
6610 .open = tracing_clock_open,
6612 .llseek = seq_lseek,
6613 .release = tracing_single_release_tr,
6614 .write = tracing_clock_write,
6617 static const struct file_operations trace_time_stamp_mode_fops = {
6618 .open = tracing_time_stamp_mode_open,
6620 .llseek = seq_lseek,
6621 .release = tracing_single_release_tr,
6624 #ifdef CONFIG_TRACER_SNAPSHOT
6625 static const struct file_operations snapshot_fops = {
6626 .open = tracing_snapshot_open,
6628 .write = tracing_snapshot_write,
6629 .llseek = tracing_lseek,
6630 .release = tracing_snapshot_release,
6633 static const struct file_operations snapshot_raw_fops = {
6634 .open = snapshot_raw_open,
6635 .read = tracing_buffers_read,
6636 .release = tracing_buffers_release,
6637 .splice_read = tracing_buffers_splice_read,
6638 .llseek = no_llseek,
6641 #endif /* CONFIG_TRACER_SNAPSHOT */
6643 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6645 struct trace_array *tr = inode->i_private;
6646 struct ftrace_buffer_info *info;
6649 if (tracing_disabled)
6652 if (trace_array_get(tr) < 0)
6655 info = kzalloc(sizeof(*info), GFP_KERNEL);
6657 trace_array_put(tr);
6661 mutex_lock(&trace_types_lock);
6664 info->iter.cpu_file = tracing_get_cpu(inode);
6665 info->iter.trace = tr->current_trace;
6666 info->iter.trace_buffer = &tr->trace_buffer;
6668 /* Force reading ring buffer for first read */
6669 info->read = (unsigned int)-1;
6671 filp->private_data = info;
6673 tr->current_trace->ref++;
6675 mutex_unlock(&trace_types_lock);
6677 ret = nonseekable_open(inode, filp);
6679 trace_array_put(tr);
6685 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6687 struct ftrace_buffer_info *info = filp->private_data;
6688 struct trace_iterator *iter = &info->iter;
6690 return trace_poll(iter, filp, poll_table);
6694 tracing_buffers_read(struct file *filp, char __user *ubuf,
6695 size_t count, loff_t *ppos)
6697 struct ftrace_buffer_info *info = filp->private_data;
6698 struct trace_iterator *iter = &info->iter;
6705 #ifdef CONFIG_TRACER_MAX_TRACE
6706 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6711 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6713 if (IS_ERR(info->spare)) {
6714 ret = PTR_ERR(info->spare);
6717 info->spare_cpu = iter->cpu_file;
6723 /* Do we have previous read data to read? */
6724 if (info->read < PAGE_SIZE)
6728 trace_access_lock(iter->cpu_file);
6729 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6733 trace_access_unlock(iter->cpu_file);
6736 if (trace_empty(iter)) {
6737 if ((filp->f_flags & O_NONBLOCK))
6740 ret = wait_on_pipe(iter, false);
6751 size = PAGE_SIZE - info->read;
6755 ret = copy_to_user(ubuf, info->spare + info->read, size);
6767 static int tracing_buffers_release(struct inode *inode, struct file *file)
6769 struct ftrace_buffer_info *info = file->private_data;
6770 struct trace_iterator *iter = &info->iter;
6772 mutex_lock(&trace_types_lock);
6774 iter->tr->current_trace->ref--;
6776 __trace_array_put(iter->tr);
6779 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6780 info->spare_cpu, info->spare);
6783 mutex_unlock(&trace_types_lock);
6789 struct ring_buffer *buffer;
6795 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6796 struct pipe_buffer *buf)
6798 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6803 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6808 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6809 struct pipe_buffer *buf)
6811 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6816 /* Pipe buffer operations for a buffer. */
6817 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6819 .confirm = generic_pipe_buf_confirm,
6820 .release = buffer_pipe_buf_release,
6821 .steal = generic_pipe_buf_steal,
6822 .get = buffer_pipe_buf_get,
6826 * Callback from splice_to_pipe(), if we need to release some pages
6827 * at the end of the spd in case we error'ed out in filling the pipe.
6829 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6831 struct buffer_ref *ref =
6832 (struct buffer_ref *)spd->partial[i].private;
6837 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6839 spd->partial[i].private = 0;
6843 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6844 struct pipe_inode_info *pipe, size_t len,
6847 struct ftrace_buffer_info *info = file->private_data;
6848 struct trace_iterator *iter = &info->iter;
6849 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6850 struct page *pages_def[PIPE_DEF_BUFFERS];
6851 struct splice_pipe_desc spd = {
6853 .partial = partial_def,
6854 .nr_pages_max = PIPE_DEF_BUFFERS,
6855 .ops = &buffer_pipe_buf_ops,
6856 .spd_release = buffer_spd_release,
6858 struct buffer_ref *ref;
6862 #ifdef CONFIG_TRACER_MAX_TRACE
6863 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6867 if (*ppos & (PAGE_SIZE - 1))
6870 if (len & (PAGE_SIZE - 1)) {
6871 if (len < PAGE_SIZE)
6876 if (splice_grow_spd(pipe, &spd))
6880 trace_access_lock(iter->cpu_file);
6881 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6883 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6887 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6894 ref->buffer = iter->trace_buffer->buffer;
6895 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6896 if (IS_ERR(ref->page)) {
6897 ret = PTR_ERR(ref->page);
6902 ref->cpu = iter->cpu_file;
6904 r = ring_buffer_read_page(ref->buffer, &ref->page,
6905 len, iter->cpu_file, 1);
6907 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6913 page = virt_to_page(ref->page);
6915 spd.pages[i] = page;
6916 spd.partial[i].len = PAGE_SIZE;
6917 spd.partial[i].offset = 0;
6918 spd.partial[i].private = (unsigned long)ref;
6922 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6925 trace_access_unlock(iter->cpu_file);
6928 /* did we read anything? */
6929 if (!spd.nr_pages) {
6934 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6937 ret = wait_on_pipe(iter, true);
6944 ret = splice_to_pipe(pipe, &spd);
6946 splice_shrink_spd(&spd);
6951 static const struct file_operations tracing_buffers_fops = {
6952 .open = tracing_buffers_open,
6953 .read = tracing_buffers_read,
6954 .poll = tracing_buffers_poll,
6955 .release = tracing_buffers_release,
6956 .splice_read = tracing_buffers_splice_read,
6957 .llseek = no_llseek,
6961 tracing_stats_read(struct file *filp, char __user *ubuf,
6962 size_t count, loff_t *ppos)
6964 struct inode *inode = file_inode(filp);
6965 struct trace_array *tr = inode->i_private;
6966 struct trace_buffer *trace_buf = &tr->trace_buffer;
6967 int cpu = tracing_get_cpu(inode);
6968 struct trace_seq *s;
6970 unsigned long long t;
6971 unsigned long usec_rem;
6973 s = kmalloc(sizeof(*s), GFP_KERNEL);
6979 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
6980 trace_seq_printf(s, "entries: %ld\n", cnt);
6982 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
6983 trace_seq_printf(s, "overrun: %ld\n", cnt);
6985 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
6986 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6988 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
6989 trace_seq_printf(s, "bytes: %ld\n", cnt);
6991 if (trace_clocks[tr->clock_id].in_ns) {
6992 /* local or global for trace_clock */
6993 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
6994 usec_rem = do_div(t, USEC_PER_SEC);
6995 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6998 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
6999 usec_rem = do_div(t, USEC_PER_SEC);
7000 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7002 /* counter or tsc mode for trace_clock */
7003 trace_seq_printf(s, "oldest event ts: %llu\n",
7004 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7006 trace_seq_printf(s, "now ts: %llu\n",
7007 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7010 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7011 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7013 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7014 trace_seq_printf(s, "read events: %ld\n", cnt);
7016 count = simple_read_from_buffer(ubuf, count, ppos,
7017 s->buffer, trace_seq_used(s));
7024 static const struct file_operations tracing_stats_fops = {
7025 .open = tracing_open_generic_tr,
7026 .read = tracing_stats_read,
7027 .llseek = generic_file_llseek,
7028 .release = tracing_release_generic_tr,
7031 #ifdef CONFIG_DYNAMIC_FTRACE
7034 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7035 size_t cnt, loff_t *ppos)
7037 unsigned long *p = filp->private_data;
7038 char buf[64]; /* Not too big for a shallow stack */
7041 r = scnprintf(buf, 63, "%ld", *p);
7044 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7047 static const struct file_operations tracing_dyn_info_fops = {
7048 .open = tracing_open_generic,
7049 .read = tracing_read_dyn_info,
7050 .llseek = generic_file_llseek,
7052 #endif /* CONFIG_DYNAMIC_FTRACE */
7054 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7056 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7057 struct trace_array *tr, struct ftrace_probe_ops *ops,
7060 tracing_snapshot_instance(tr);
7064 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7065 struct trace_array *tr, struct ftrace_probe_ops *ops,
7068 struct ftrace_func_mapper *mapper = data;
7072 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7082 tracing_snapshot_instance(tr);
7086 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7087 struct ftrace_probe_ops *ops, void *data)
7089 struct ftrace_func_mapper *mapper = data;
7092 seq_printf(m, "%ps:", (void *)ip);
7094 seq_puts(m, "snapshot");
7097 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7100 seq_printf(m, ":count=%ld\n", *count);
7102 seq_puts(m, ":unlimited\n");
7108 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7109 unsigned long ip, void *init_data, void **data)
7111 struct ftrace_func_mapper *mapper = *data;
7114 mapper = allocate_ftrace_func_mapper();
7120 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7124 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7125 unsigned long ip, void *data)
7127 struct ftrace_func_mapper *mapper = data;
7132 free_ftrace_func_mapper(mapper, NULL);
7136 ftrace_func_mapper_remove_ip(mapper, ip);
7139 static struct ftrace_probe_ops snapshot_probe_ops = {
7140 .func = ftrace_snapshot,
7141 .print = ftrace_snapshot_print,
7144 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7145 .func = ftrace_count_snapshot,
7146 .print = ftrace_snapshot_print,
7147 .init = ftrace_snapshot_init,
7148 .free = ftrace_snapshot_free,
7152 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7153 char *glob, char *cmd, char *param, int enable)
7155 struct ftrace_probe_ops *ops;
7156 void *count = (void *)-1;
7163 /* hash funcs only work with set_ftrace_filter */
7167 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7170 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7175 number = strsep(¶m, ":");
7177 if (!strlen(number))
7181 * We use the callback data field (which is a pointer)
7184 ret = kstrtoul(number, 0, (unsigned long *)&count);
7189 ret = tracing_alloc_snapshot_instance(tr);
7193 ret = register_ftrace_function_probe(glob, tr, ops, count);
7196 return ret < 0 ? ret : 0;
7199 static struct ftrace_func_command ftrace_snapshot_cmd = {
7201 .func = ftrace_trace_snapshot_callback,
7204 static __init int register_snapshot_cmd(void)
7206 return register_ftrace_command(&ftrace_snapshot_cmd);
7209 static inline __init int register_snapshot_cmd(void) { return 0; }
7210 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7212 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7214 if (WARN_ON(!tr->dir))
7215 return ERR_PTR(-ENODEV);
7217 /* Top directory uses NULL as the parent */
7218 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7221 /* All sub buffers have a descriptor */
7225 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7227 struct dentry *d_tracer;
7230 return tr->percpu_dir;
7232 d_tracer = tracing_get_dentry(tr);
7233 if (IS_ERR(d_tracer))
7236 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7238 WARN_ONCE(!tr->percpu_dir,
7239 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7241 return tr->percpu_dir;
7244 static struct dentry *
7245 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7246 void *data, long cpu, const struct file_operations *fops)
7248 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7250 if (ret) /* See tracing_get_cpu() */
7251 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7256 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7258 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7259 struct dentry *d_cpu;
7260 char cpu_dir[30]; /* 30 characters should be more than enough */
7265 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7266 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7268 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7272 /* per cpu trace_pipe */
7273 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7274 tr, cpu, &tracing_pipe_fops);
7277 trace_create_cpu_file("trace", 0644, d_cpu,
7278 tr, cpu, &tracing_fops);
7280 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7281 tr, cpu, &tracing_buffers_fops);
7283 trace_create_cpu_file("stats", 0444, d_cpu,
7284 tr, cpu, &tracing_stats_fops);
7286 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7287 tr, cpu, &tracing_entries_fops);
7289 #ifdef CONFIG_TRACER_SNAPSHOT
7290 trace_create_cpu_file("snapshot", 0644, d_cpu,
7291 tr, cpu, &snapshot_fops);
7293 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7294 tr, cpu, &snapshot_raw_fops);
7298 #ifdef CONFIG_FTRACE_SELFTEST
7299 /* Let selftest have access to static functions in this file */
7300 #include "trace_selftest.c"
7304 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7307 struct trace_option_dentry *topt = filp->private_data;
7310 if (topt->flags->val & topt->opt->bit)
7315 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7319 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7322 struct trace_option_dentry *topt = filp->private_data;
7326 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7330 if (val != 0 && val != 1)
7333 if (!!(topt->flags->val & topt->opt->bit) != val) {
7334 mutex_lock(&trace_types_lock);
7335 ret = __set_tracer_option(topt->tr, topt->flags,
7337 mutex_unlock(&trace_types_lock);
7348 static const struct file_operations trace_options_fops = {
7349 .open = tracing_open_generic,
7350 .read = trace_options_read,
7351 .write = trace_options_write,
7352 .llseek = generic_file_llseek,
7356 * In order to pass in both the trace_array descriptor as well as the index
7357 * to the flag that the trace option file represents, the trace_array
7358 * has a character array of trace_flags_index[], which holds the index
7359 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7360 * The address of this character array is passed to the flag option file
7361 * read/write callbacks.
7363 * In order to extract both the index and the trace_array descriptor,
7364 * get_tr_index() uses the following algorithm.
7368 * As the pointer itself contains the address of the index (remember
7371 * Then to get the trace_array descriptor, by subtracting that index
7372 * from the ptr, we get to the start of the index itself.
7374 * ptr - idx == &index[0]
7376 * Then a simple container_of() from that pointer gets us to the
7377 * trace_array descriptor.
7379 static void get_tr_index(void *data, struct trace_array **ptr,
7380 unsigned int *pindex)
7382 *pindex = *(unsigned char *)data;
7384 *ptr = container_of(data - *pindex, struct trace_array,
7389 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7392 void *tr_index = filp->private_data;
7393 struct trace_array *tr;
7397 get_tr_index(tr_index, &tr, &index);
7399 if (tr->trace_flags & (1 << index))
7404 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7408 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7411 void *tr_index = filp->private_data;
7412 struct trace_array *tr;
7417 get_tr_index(tr_index, &tr, &index);
7419 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7423 if (val != 0 && val != 1)
7426 mutex_lock(&trace_types_lock);
7427 ret = set_tracer_flag(tr, 1 << index, val);
7428 mutex_unlock(&trace_types_lock);
7438 static const struct file_operations trace_options_core_fops = {
7439 .open = tracing_open_generic,
7440 .read = trace_options_core_read,
7441 .write = trace_options_core_write,
7442 .llseek = generic_file_llseek,
7445 struct dentry *trace_create_file(const char *name,
7447 struct dentry *parent,
7449 const struct file_operations *fops)
7453 ret = tracefs_create_file(name, mode, parent, data, fops);
7455 pr_warn("Could not create tracefs '%s' entry\n", name);
7461 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7463 struct dentry *d_tracer;
7468 d_tracer = tracing_get_dentry(tr);
7469 if (IS_ERR(d_tracer))
7472 tr->options = tracefs_create_dir("options", d_tracer);
7474 pr_warn("Could not create tracefs directory 'options'\n");
7482 create_trace_option_file(struct trace_array *tr,
7483 struct trace_option_dentry *topt,
7484 struct tracer_flags *flags,
7485 struct tracer_opt *opt)
7487 struct dentry *t_options;
7489 t_options = trace_options_init_dentry(tr);
7493 topt->flags = flags;
7497 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7498 &trace_options_fops);
7503 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7505 struct trace_option_dentry *topts;
7506 struct trace_options *tr_topts;
7507 struct tracer_flags *flags;
7508 struct tracer_opt *opts;
7515 flags = tracer->flags;
7517 if (!flags || !flags->opts)
7521 * If this is an instance, only create flags for tracers
7522 * the instance may have.
7524 if (!trace_ok_for_array(tracer, tr))
7527 for (i = 0; i < tr->nr_topts; i++) {
7528 /* Make sure there's no duplicate flags. */
7529 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7535 for (cnt = 0; opts[cnt].name; cnt++)
7538 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7542 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7549 tr->topts = tr_topts;
7550 tr->topts[tr->nr_topts].tracer = tracer;
7551 tr->topts[tr->nr_topts].topts = topts;
7554 for (cnt = 0; opts[cnt].name; cnt++) {
7555 create_trace_option_file(tr, &topts[cnt], flags,
7557 WARN_ONCE(topts[cnt].entry == NULL,
7558 "Failed to create trace option: %s",
7563 static struct dentry *
7564 create_trace_option_core_file(struct trace_array *tr,
7565 const char *option, long index)
7567 struct dentry *t_options;
7569 t_options = trace_options_init_dentry(tr);
7573 return trace_create_file(option, 0644, t_options,
7574 (void *)&tr->trace_flags_index[index],
7575 &trace_options_core_fops);
7578 static void create_trace_options_dir(struct trace_array *tr)
7580 struct dentry *t_options;
7581 bool top_level = tr == &global_trace;
7584 t_options = trace_options_init_dentry(tr);
7588 for (i = 0; trace_options[i]; i++) {
7590 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7591 create_trace_option_core_file(tr, trace_options[i], i);
7596 rb_simple_read(struct file *filp, char __user *ubuf,
7597 size_t cnt, loff_t *ppos)
7599 struct trace_array *tr = filp->private_data;
7603 r = tracer_tracing_is_on(tr);
7604 r = sprintf(buf, "%d\n", r);
7606 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7610 rb_simple_write(struct file *filp, const char __user *ubuf,
7611 size_t cnt, loff_t *ppos)
7613 struct trace_array *tr = filp->private_data;
7614 struct ring_buffer *buffer = tr->trace_buffer.buffer;
7618 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7623 mutex_lock(&trace_types_lock);
7625 tracer_tracing_on(tr);
7626 if (tr->current_trace->start)
7627 tr->current_trace->start(tr);
7629 tracer_tracing_off(tr);
7630 if (tr->current_trace->stop)
7631 tr->current_trace->stop(tr);
7633 mutex_unlock(&trace_types_lock);
7641 static const struct file_operations rb_simple_fops = {
7642 .open = tracing_open_generic_tr,
7643 .read = rb_simple_read,
7644 .write = rb_simple_write,
7645 .release = tracing_release_generic_tr,
7646 .llseek = default_llseek,
7649 struct dentry *trace_instance_dir;
7652 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7655 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7657 enum ring_buffer_flags rb_flags;
7659 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7663 buf->buffer = ring_buffer_alloc(size, rb_flags);
7667 buf->data = alloc_percpu(struct trace_array_cpu);
7669 ring_buffer_free(buf->buffer);
7674 /* Allocate the first page for all buffers */
7675 set_buffer_entries(&tr->trace_buffer,
7676 ring_buffer_size(tr->trace_buffer.buffer, 0));
7681 static int allocate_trace_buffers(struct trace_array *tr, int size)
7685 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7689 #ifdef CONFIG_TRACER_MAX_TRACE
7690 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7691 allocate_snapshot ? size : 1);
7693 ring_buffer_free(tr->trace_buffer.buffer);
7694 tr->trace_buffer.buffer = NULL;
7695 free_percpu(tr->trace_buffer.data);
7696 tr->trace_buffer.data = NULL;
7699 tr->allocated_snapshot = allocate_snapshot;
7702 * Only the top level trace array gets its snapshot allocated
7703 * from the kernel command line.
7705 allocate_snapshot = false;
7710 static void free_trace_buffer(struct trace_buffer *buf)
7713 ring_buffer_free(buf->buffer);
7715 free_percpu(buf->data);
7720 static void free_trace_buffers(struct trace_array *tr)
7725 free_trace_buffer(&tr->trace_buffer);
7727 #ifdef CONFIG_TRACER_MAX_TRACE
7728 free_trace_buffer(&tr->max_buffer);
7732 static void init_trace_flags_index(struct trace_array *tr)
7736 /* Used by the trace options files */
7737 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7738 tr->trace_flags_index[i] = i;
7741 static void __update_tracer_options(struct trace_array *tr)
7745 for (t = trace_types; t; t = t->next)
7746 add_tracer_options(tr, t);
7749 static void update_tracer_options(struct trace_array *tr)
7751 mutex_lock(&trace_types_lock);
7752 __update_tracer_options(tr);
7753 mutex_unlock(&trace_types_lock);
7756 static int instance_mkdir(const char *name)
7758 struct trace_array *tr;
7761 mutex_lock(&event_mutex);
7762 mutex_lock(&trace_types_lock);
7765 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7766 if (tr->name && strcmp(tr->name, name) == 0)
7771 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7775 tr->name = kstrdup(name, GFP_KERNEL);
7779 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7782 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7784 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7786 raw_spin_lock_init(&tr->start_lock);
7788 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7790 tr->current_trace = &nop_trace;
7792 INIT_LIST_HEAD(&tr->systems);
7793 INIT_LIST_HEAD(&tr->events);
7794 INIT_LIST_HEAD(&tr->hist_vars);
7796 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7799 tr->dir = tracefs_create_dir(name, trace_instance_dir);
7803 ret = event_trace_add_tracer(tr->dir, tr);
7805 tracefs_remove_recursive(tr->dir);
7809 ftrace_init_trace_array(tr);
7811 init_tracer_tracefs(tr, tr->dir);
7812 init_trace_flags_index(tr);
7813 __update_tracer_options(tr);
7815 list_add(&tr->list, &ftrace_trace_arrays);
7817 mutex_unlock(&trace_types_lock);
7818 mutex_unlock(&event_mutex);
7823 free_trace_buffers(tr);
7824 free_cpumask_var(tr->tracing_cpumask);
7829 mutex_unlock(&trace_types_lock);
7830 mutex_unlock(&event_mutex);
7836 static int instance_rmdir(const char *name)
7838 struct trace_array *tr;
7843 mutex_lock(&event_mutex);
7844 mutex_lock(&trace_types_lock);
7847 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7848 if (tr->name && strcmp(tr->name, name) == 0) {
7857 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7860 list_del(&tr->list);
7862 /* Disable all the flags that were enabled coming in */
7863 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7864 if ((1 << i) & ZEROED_TRACE_FLAGS)
7865 set_tracer_flag(tr, 1 << i, 0);
7868 tracing_set_nop(tr);
7869 clear_ftrace_function_probes(tr);
7870 event_trace_del_tracer(tr);
7871 ftrace_clear_pids(tr);
7872 ftrace_destroy_function_files(tr);
7873 tracefs_remove_recursive(tr->dir);
7874 free_trace_buffers(tr);
7876 for (i = 0; i < tr->nr_topts; i++) {
7877 kfree(tr->topts[i].topts);
7881 free_cpumask_var(tr->tracing_cpumask);
7888 mutex_unlock(&trace_types_lock);
7889 mutex_unlock(&event_mutex);
7894 static __init void create_trace_instances(struct dentry *d_tracer)
7896 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7899 if (WARN_ON(!trace_instance_dir))
7904 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7906 struct trace_event_file *file;
7909 trace_create_file("available_tracers", 0444, d_tracer,
7910 tr, &show_traces_fops);
7912 trace_create_file("current_tracer", 0644, d_tracer,
7913 tr, &set_tracer_fops);
7915 trace_create_file("tracing_cpumask", 0644, d_tracer,
7916 tr, &tracing_cpumask_fops);
7918 trace_create_file("trace_options", 0644, d_tracer,
7919 tr, &tracing_iter_fops);
7921 trace_create_file("trace", 0644, d_tracer,
7924 trace_create_file("trace_pipe", 0444, d_tracer,
7925 tr, &tracing_pipe_fops);
7927 trace_create_file("buffer_size_kb", 0644, d_tracer,
7928 tr, &tracing_entries_fops);
7930 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7931 tr, &tracing_total_entries_fops);
7933 trace_create_file("free_buffer", 0200, d_tracer,
7934 tr, &tracing_free_buffer_fops);
7936 trace_create_file("trace_marker", 0220, d_tracer,
7937 tr, &tracing_mark_fops);
7939 file = __find_event_file(tr, "ftrace", "print");
7940 if (file && file->dir)
7941 trace_create_file("trigger", 0644, file->dir, file,
7942 &event_trigger_fops);
7943 tr->trace_marker_file = file;
7945 trace_create_file("trace_marker_raw", 0220, d_tracer,
7946 tr, &tracing_mark_raw_fops);
7948 trace_create_file("trace_clock", 0644, d_tracer, tr,
7951 trace_create_file("tracing_on", 0644, d_tracer,
7952 tr, &rb_simple_fops);
7954 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
7955 &trace_time_stamp_mode_fops);
7957 create_trace_options_dir(tr);
7959 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7960 trace_create_file("tracing_max_latency", 0644, d_tracer,
7961 &tr->max_latency, &tracing_max_lat_fops);
7964 if (ftrace_create_function_files(tr, d_tracer))
7965 WARN(1, "Could not allocate function filter files");
7967 #ifdef CONFIG_TRACER_SNAPSHOT
7968 trace_create_file("snapshot", 0644, d_tracer,
7969 tr, &snapshot_fops);
7972 for_each_tracing_cpu(cpu)
7973 tracing_init_tracefs_percpu(tr, cpu);
7975 ftrace_init_tracefs(tr, d_tracer);
7978 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
7980 struct vfsmount *mnt;
7981 struct file_system_type *type;
7984 * To maintain backward compatibility for tools that mount
7985 * debugfs to get to the tracing facility, tracefs is automatically
7986 * mounted to the debugfs/tracing directory.
7988 type = get_fs_type("tracefs");
7991 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
7992 put_filesystem(type);
8001 * tracing_init_dentry - initialize top level trace array
8003 * This is called when creating files or directories in the tracing
8004 * directory. It is called via fs_initcall() by any of the boot up code
8005 * and expects to return the dentry of the top level tracing directory.
8007 struct dentry *tracing_init_dentry(void)
8009 struct trace_array *tr = &global_trace;
8011 /* The top level trace array uses NULL as parent */
8015 if (WARN_ON(!tracefs_initialized()) ||
8016 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8017 WARN_ON(!debugfs_initialized())))
8018 return ERR_PTR(-ENODEV);
8021 * As there may still be users that expect the tracing
8022 * files to exist in debugfs/tracing, we must automount
8023 * the tracefs file system there, so older tools still
8024 * work with the newer kerenl.
8026 tr->dir = debugfs_create_automount("tracing", NULL,
8027 trace_automount, NULL);
8029 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8030 return ERR_PTR(-ENOMEM);
8036 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8037 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8039 static void __init trace_eval_init(void)
8043 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8044 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8047 #ifdef CONFIG_MODULES
8048 static void trace_module_add_evals(struct module *mod)
8050 if (!mod->num_trace_evals)
8054 * Modules with bad taint do not have events created, do
8055 * not bother with enums either.
8057 if (trace_module_has_bad_taint(mod))
8060 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8063 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8064 static void trace_module_remove_evals(struct module *mod)
8066 union trace_eval_map_item *map;
8067 union trace_eval_map_item **last = &trace_eval_maps;
8069 if (!mod->num_trace_evals)
8072 mutex_lock(&trace_eval_mutex);
8074 map = trace_eval_maps;
8077 if (map->head.mod == mod)
8079 map = trace_eval_jmp_to_tail(map);
8080 last = &map->tail.next;
8081 map = map->tail.next;
8086 *last = trace_eval_jmp_to_tail(map)->tail.next;
8089 mutex_unlock(&trace_eval_mutex);
8092 static inline void trace_module_remove_evals(struct module *mod) { }
8093 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8095 static int trace_module_notify(struct notifier_block *self,
8096 unsigned long val, void *data)
8098 struct module *mod = data;
8101 case MODULE_STATE_COMING:
8102 trace_module_add_evals(mod);
8104 case MODULE_STATE_GOING:
8105 trace_module_remove_evals(mod);
8112 static struct notifier_block trace_module_nb = {
8113 .notifier_call = trace_module_notify,
8116 #endif /* CONFIG_MODULES */
8118 static __init int tracer_init_tracefs(void)
8120 struct dentry *d_tracer;
8122 trace_access_lock_init();
8124 d_tracer = tracing_init_dentry();
8125 if (IS_ERR(d_tracer))
8130 init_tracer_tracefs(&global_trace, d_tracer);
8131 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8133 trace_create_file("tracing_thresh", 0644, d_tracer,
8134 &global_trace, &tracing_thresh_fops);
8136 trace_create_file("README", 0444, d_tracer,
8137 NULL, &tracing_readme_fops);
8139 trace_create_file("saved_cmdlines", 0444, d_tracer,
8140 NULL, &tracing_saved_cmdlines_fops);
8142 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8143 NULL, &tracing_saved_cmdlines_size_fops);
8145 trace_create_file("saved_tgids", 0444, d_tracer,
8146 NULL, &tracing_saved_tgids_fops);
8150 trace_create_eval_file(d_tracer);
8152 #ifdef CONFIG_MODULES
8153 register_module_notifier(&trace_module_nb);
8156 #ifdef CONFIG_DYNAMIC_FTRACE
8157 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8158 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8161 create_trace_instances(d_tracer);
8163 update_tracer_options(&global_trace);
8168 static int trace_panic_handler(struct notifier_block *this,
8169 unsigned long event, void *unused)
8171 if (ftrace_dump_on_oops)
8172 ftrace_dump(ftrace_dump_on_oops);
8176 static struct notifier_block trace_panic_notifier = {
8177 .notifier_call = trace_panic_handler,
8179 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8182 static int trace_die_handler(struct notifier_block *self,
8188 if (ftrace_dump_on_oops)
8189 ftrace_dump(ftrace_dump_on_oops);
8197 static struct notifier_block trace_die_notifier = {
8198 .notifier_call = trace_die_handler,
8203 * printk is set to max of 1024, we really don't need it that big.
8204 * Nothing should be printing 1000 characters anyway.
8206 #define TRACE_MAX_PRINT 1000
8209 * Define here KERN_TRACE so that we have one place to modify
8210 * it if we decide to change what log level the ftrace dump
8213 #define KERN_TRACE KERN_EMERG
8216 trace_printk_seq(struct trace_seq *s)
8218 /* Probably should print a warning here. */
8219 if (s->seq.len >= TRACE_MAX_PRINT)
8220 s->seq.len = TRACE_MAX_PRINT;
8223 * More paranoid code. Although the buffer size is set to
8224 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8225 * an extra layer of protection.
8227 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8228 s->seq.len = s->seq.size - 1;
8230 /* should be zero ended, but we are paranoid. */
8231 s->buffer[s->seq.len] = 0;
8233 printk(KERN_TRACE "%s", s->buffer);
8238 void trace_init_global_iter(struct trace_iterator *iter)
8240 iter->tr = &global_trace;
8241 iter->trace = iter->tr->current_trace;
8242 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8243 iter->trace_buffer = &global_trace.trace_buffer;
8245 if (iter->trace && iter->trace->open)
8246 iter->trace->open(iter);
8248 /* Annotate start of buffers if we had overruns */
8249 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8250 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8252 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8253 if (trace_clocks[iter->tr->clock_id].in_ns)
8254 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8257 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8259 /* use static because iter can be a bit big for the stack */
8260 static struct trace_iterator iter;
8261 static atomic_t dump_running;
8262 struct trace_array *tr = &global_trace;
8263 unsigned int old_userobj;
8264 unsigned long flags;
8267 /* Only allow one dump user at a time. */
8268 if (atomic_inc_return(&dump_running) != 1) {
8269 atomic_dec(&dump_running);
8274 * Always turn off tracing when we dump.
8275 * We don't need to show trace output of what happens
8276 * between multiple crashes.
8278 * If the user does a sysrq-z, then they can re-enable
8279 * tracing with echo 1 > tracing_on.
8283 local_irq_save(flags);
8285 /* Simulate the iterator */
8286 trace_init_global_iter(&iter);
8288 for_each_tracing_cpu(cpu) {
8289 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8292 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8294 /* don't look at user memory in panic mode */
8295 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8297 switch (oops_dump_mode) {
8299 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8302 iter.cpu_file = raw_smp_processor_id();
8307 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8308 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8311 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8313 /* Did function tracer already get disabled? */
8314 if (ftrace_is_dead()) {
8315 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8316 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8320 * We need to stop all tracing on all CPUS to read the
8321 * the next buffer. This is a bit expensive, but is
8322 * not done often. We fill all what we can read,
8323 * and then release the locks again.
8326 while (!trace_empty(&iter)) {
8329 printk(KERN_TRACE "---------------------------------\n");
8333 /* reset all but tr, trace, and overruns */
8334 memset(&iter.seq, 0,
8335 sizeof(struct trace_iterator) -
8336 offsetof(struct trace_iterator, seq));
8337 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8340 if (trace_find_next_entry_inc(&iter) != NULL) {
8343 ret = print_trace_line(&iter);
8344 if (ret != TRACE_TYPE_NO_CONSUME)
8345 trace_consume(&iter);
8347 touch_nmi_watchdog();
8349 trace_printk_seq(&iter.seq);
8353 printk(KERN_TRACE " (ftrace buffer empty)\n");
8355 printk(KERN_TRACE "---------------------------------\n");
8358 tr->trace_flags |= old_userobj;
8360 for_each_tracing_cpu(cpu) {
8361 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8363 atomic_dec(&dump_running);
8364 local_irq_restore(flags);
8366 EXPORT_SYMBOL_GPL(ftrace_dump);
8368 int trace_run_command(const char *buf, int (*createfn)(int, char **))
8375 argv = argv_split(GFP_KERNEL, buf, &argc);
8380 ret = createfn(argc, argv);
8387 #define WRITE_BUFSIZE 4096
8389 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8390 size_t count, loff_t *ppos,
8391 int (*createfn)(int, char **))
8393 char *kbuf, *buf, *tmp;
8398 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8402 while (done < count) {
8403 size = count - done;
8405 if (size >= WRITE_BUFSIZE)
8406 size = WRITE_BUFSIZE - 1;
8408 if (copy_from_user(kbuf, buffer + done, size)) {
8415 tmp = strchr(buf, '\n');
8418 size = tmp - buf + 1;
8421 if (done + size < count) {
8424 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8425 pr_warn("Line length is too long: Should be less than %d\n",
8433 /* Remove comments */
8434 tmp = strchr(buf, '#');
8439 ret = trace_run_command(buf, createfn);
8444 } while (done < count);
8454 __init static int tracer_alloc_buffers(void)
8460 * Make sure we don't accidently add more trace options
8461 * than we have bits for.
8463 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8465 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8468 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8469 goto out_free_buffer_mask;
8471 /* Only allocate trace_printk buffers if a trace_printk exists */
8472 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
8473 /* Must be called before global_trace.buffer is allocated */
8474 trace_printk_init_buffers();
8476 /* To save memory, keep the ring buffer size to its minimum */
8477 if (ring_buffer_expanded)
8478 ring_buf_size = trace_buf_size;
8482 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8483 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8485 raw_spin_lock_init(&global_trace.start_lock);
8488 * The prepare callbacks allocates some memory for the ring buffer. We
8489 * don't free the buffer if the if the CPU goes down. If we were to free
8490 * the buffer, then the user would lose any trace that was in the
8491 * buffer. The memory will be removed once the "instance" is removed.
8493 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8494 "trace/RB:preapre", trace_rb_cpu_prepare,
8497 goto out_free_cpumask;
8498 /* Used for event triggers */
8500 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8502 goto out_rm_hp_state;
8504 if (trace_create_savedcmd() < 0)
8505 goto out_free_temp_buffer;
8507 /* TODO: make the number of buffers hot pluggable with CPUS */
8508 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8509 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8511 goto out_free_savedcmd;
8514 if (global_trace.buffer_disabled)
8517 if (trace_boot_clock) {
8518 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8520 pr_warn("Trace clock %s not defined, going back to default\n",
8525 * register_tracer() might reference current_trace, so it
8526 * needs to be set before we register anything. This is
8527 * just a bootstrap of current_trace anyway.
8529 global_trace.current_trace = &nop_trace;
8531 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8533 ftrace_init_global_array_ops(&global_trace);
8535 init_trace_flags_index(&global_trace);
8537 register_tracer(&nop_trace);
8539 /* Function tracing may start here (via kernel command line) */
8540 init_function_trace();
8542 /* All seems OK, enable tracing */
8543 tracing_disabled = 0;
8545 atomic_notifier_chain_register(&panic_notifier_list,
8546 &trace_panic_notifier);
8548 register_die_notifier(&trace_die_notifier);
8550 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8552 INIT_LIST_HEAD(&global_trace.systems);
8553 INIT_LIST_HEAD(&global_trace.events);
8554 INIT_LIST_HEAD(&global_trace.hist_vars);
8555 list_add(&global_trace.list, &ftrace_trace_arrays);
8557 apply_trace_boot_options();
8559 register_snapshot_cmd();
8564 free_saved_cmdlines_buffer(savedcmd);
8565 out_free_temp_buffer:
8566 ring_buffer_free(temp_buffer);
8568 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8570 free_cpumask_var(global_trace.tracing_cpumask);
8571 out_free_buffer_mask:
8572 free_cpumask_var(tracing_buffer_mask);
8577 void __init early_trace_init(void)
8579 if (tracepoint_printk) {
8580 tracepoint_print_iter =
8581 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8582 if (WARN_ON(!tracepoint_print_iter))
8583 tracepoint_printk = 0;
8585 static_key_enable(&tracepoint_printk_key.key);
8587 tracer_alloc_buffers();
8590 void __init trace_init(void)
8595 __init static int clear_boot_tracer(void)
8598 * The default tracer at boot buffer is an init section.
8599 * This function is called in lateinit. If we did not
8600 * find the boot tracer, then clear it out, to prevent
8601 * later registration from accessing the buffer that is
8602 * about to be freed.
8604 if (!default_bootup_tracer)
8607 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8608 default_bootup_tracer);
8609 default_bootup_tracer = NULL;
8614 fs_initcall(tracer_init_tracefs);
8615 late_initcall_sync(clear_boot_tracer);
8617 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8618 __init static int tracing_set_default_clock(void)
8620 /* sched_clock_stable() is determined in late_initcall */
8621 if (!trace_boot_clock && !sched_clock_stable()) {
8623 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8624 "If you want to keep using the local clock, then add:\n"
8625 " \"trace_clock=local\"\n"
8626 "on the kernel command line\n");
8627 tracing_set_clock(&global_trace, "global");
8632 late_initcall_sync(tracing_set_default_clock);