1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
53 #include "trace_output.h"
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
59 bool ring_buffer_expanded;
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
68 static bool __read_mostly tracing_selftest_running;
71 * If a tracer is running, we do not want to run SELFTEST.
73 bool __read_mostly tracing_selftest_disabled;
75 /* Pipe tracepoints to printk */
76 struct trace_iterator *tracepoint_print_iter;
77 int tracepoint_printk;
78 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
80 /* For tracers that don't implement custom flags */
81 static struct tracer_opt dummy_tracer_opt[] = {
86 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
92 * To prevent the comm cache from being overwritten when no
93 * tracing is active, only save the comm when a trace event
96 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
99 * Kill all tracing for good (never come back).
100 * It is initialized to 1 but will turn to zero if the initialization
101 * of the tracer is successful. But that is the only place that sets
104 static int tracing_disabled = 1;
106 cpumask_var_t __read_mostly tracing_buffer_mask;
109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112 * is set, then ftrace_dump is called. This will output the contents
113 * of the ftrace buffers to the console. This is very useful for
114 * capturing traces that lead to crashes and outputing it to a
117 * It is default off, but you can enable it with either specifying
118 * "ftrace_dump_on_oops" in the kernel command line, or setting
119 * /proc/sys/kernel/ftrace_dump_on_oops
120 * Set 1 if you want to dump buffers of all CPUs
121 * Set 2 if you want to dump the buffer of the CPU that triggered oops
124 enum ftrace_dump_mode ftrace_dump_on_oops;
126 /* When set, tracing will stop when a WARN*() is hit */
127 int __disable_trace_on_warning;
129 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
130 /* Map of enums to their values, for "eval_map" file */
131 struct trace_eval_map_head {
133 unsigned long length;
136 union trace_eval_map_item;
138 struct trace_eval_map_tail {
140 * "end" is first and points to NULL as it must be different
141 * than "mod" or "eval_string"
143 union trace_eval_map_item *next;
144 const char *end; /* points to NULL */
147 static DEFINE_MUTEX(trace_eval_mutex);
150 * The trace_eval_maps are saved in an array with two extra elements,
151 * one at the beginning, and one at the end. The beginning item contains
152 * the count of the saved maps (head.length), and the module they
153 * belong to if not built in (head.mod). The ending item contains a
154 * pointer to the next array of saved eval_map items.
156 union trace_eval_map_item {
157 struct trace_eval_map map;
158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
162 static union trace_eval_map_item *trace_eval_maps;
163 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
165 int tracing_set_tracer(struct trace_array *tr, const char *buf);
166 static void ftrace_trace_userstack(struct trace_buffer *buffer,
167 unsigned long flags, int pc);
169 #define MAX_TRACER_SIZE 100
170 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
171 static char *default_bootup_tracer;
173 static bool allocate_snapshot;
175 static int __init set_cmdline_ftrace(char *str)
177 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
178 default_bootup_tracer = bootup_tracer_buf;
179 /* We are using ftrace early, expand it */
180 ring_buffer_expanded = true;
183 __setup("ftrace=", set_cmdline_ftrace);
185 static int __init set_ftrace_dump_on_oops(char *str)
187 if (*str++ != '=' || !*str) {
188 ftrace_dump_on_oops = DUMP_ALL;
192 if (!strcmp("orig_cpu", str)) {
193 ftrace_dump_on_oops = DUMP_ORIG;
199 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
201 static int __init stop_trace_on_warning(char *str)
203 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
204 __disable_trace_on_warning = 1;
207 __setup("traceoff_on_warning", stop_trace_on_warning);
209 static int __init boot_alloc_snapshot(char *str)
211 allocate_snapshot = true;
212 /* We also need the main ring buffer expanded */
213 ring_buffer_expanded = true;
216 __setup("alloc_snapshot", boot_alloc_snapshot);
219 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
221 static int __init set_trace_boot_options(char *str)
223 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
226 __setup("trace_options=", set_trace_boot_options);
228 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
229 static char *trace_boot_clock __initdata;
231 static int __init set_trace_boot_clock(char *str)
233 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
234 trace_boot_clock = trace_boot_clock_buf;
237 __setup("trace_clock=", set_trace_boot_clock);
239 static int __init set_tracepoint_printk(char *str)
241 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
242 tracepoint_printk = 1;
245 __setup("tp_printk", set_tracepoint_printk);
247 unsigned long long ns2usecs(u64 nsec)
254 /* trace_flags holds trace_options default values */
255 #define TRACE_DEFAULT_FLAGS \
256 (FUNCTION_DEFAULT_FLAGS | \
257 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
258 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
259 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
260 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
262 /* trace_options that are only supported by global_trace */
263 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
264 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
266 /* trace_flags that are default zero for instances */
267 #define ZEROED_TRACE_FLAGS \
268 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
271 * The global_trace is the descriptor that holds the top-level tracing
272 * buffers for the live tracing.
274 static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
278 LIST_HEAD(ftrace_trace_arrays);
280 int trace_array_get(struct trace_array *this_tr)
282 struct trace_array *tr;
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
293 mutex_unlock(&trace_types_lock);
298 static void __trace_array_put(struct trace_array *this_tr)
300 WARN_ON(!this_tr->ref);
305 * trace_array_put - Decrement the reference counter for this trace array.
307 * NOTE: Use this when we no longer need the trace array returned by
308 * trace_array_get_by_name(). This ensures the trace array can be later
312 void trace_array_put(struct trace_array *this_tr)
317 mutex_lock(&trace_types_lock);
318 __trace_array_put(this_tr);
319 mutex_unlock(&trace_types_lock);
321 EXPORT_SYMBOL_GPL(trace_array_put);
323 int tracing_check_open_get_tr(struct trace_array *tr)
327 ret = security_locked_down(LOCKDOWN_TRACEFS);
331 if (tracing_disabled)
334 if (tr && trace_array_get(tr) < 0)
340 int call_filter_check_discard(struct trace_event_call *call, void *rec,
341 struct trace_buffer *buffer,
342 struct ring_buffer_event *event)
344 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
345 !filter_match_preds(call->filter, rec)) {
346 __trace_event_discard_commit(buffer, event);
353 void trace_free_pid_list(struct trace_pid_list *pid_list)
355 vfree(pid_list->pids);
360 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
361 * @filtered_pids: The list of pids to check
362 * @search_pid: The PID to find in @filtered_pids
364 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
367 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
370 * If pid_max changed after filtered_pids was created, we
371 * by default ignore all pids greater than the previous pid_max.
373 if (search_pid >= filtered_pids->pid_max)
376 return test_bit(search_pid, filtered_pids->pids);
380 * trace_ignore_this_task - should a task be ignored for tracing
381 * @filtered_pids: The list of pids to check
382 * @task: The task that should be ignored if not filtered
384 * Checks if @task should be traced or not from @filtered_pids.
385 * Returns true if @task should *NOT* be traced.
386 * Returns false if @task should be traced.
389 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
392 * Return false, because if filtered_pids does not exist,
393 * all pids are good to trace.
398 return !trace_find_filtered_pid(filtered_pids, task->pid);
402 * trace_filter_add_remove_task - Add or remove a task from a pid_list
403 * @pid_list: The list to modify
404 * @self: The current task for fork or NULL for exit
405 * @task: The task to add or remove
407 * If adding a task, if @self is defined, the task is only added if @self
408 * is also included in @pid_list. This happens on fork and tasks should
409 * only be added when the parent is listed. If @self is NULL, then the
410 * @task pid will be removed from the list, which would happen on exit
413 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
414 struct task_struct *self,
415 struct task_struct *task)
420 /* For forks, we only add if the forking task is listed */
422 if (!trace_find_filtered_pid(pid_list, self->pid))
426 /* Sorry, but we don't support pid_max changing after setting */
427 if (task->pid >= pid_list->pid_max)
430 /* "self" is set for forks, and NULL for exits */
432 set_bit(task->pid, pid_list->pids);
434 clear_bit(task->pid, pid_list->pids);
438 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
439 * @pid_list: The pid list to show
440 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
441 * @pos: The position of the file
443 * This is used by the seq_file "next" operation to iterate the pids
444 * listed in a trace_pid_list structure.
446 * Returns the pid+1 as we want to display pid of zero, but NULL would
447 * stop the iteration.
449 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
451 unsigned long pid = (unsigned long)v;
455 /* pid already is +1 of the actual prevous bit */
456 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
458 /* Return pid + 1 to allow zero to be represented */
459 if (pid < pid_list->pid_max)
460 return (void *)(pid + 1);
466 * trace_pid_start - Used for seq_file to start reading pid lists
467 * @pid_list: The pid list to show
468 * @pos: The position of the file
470 * This is used by seq_file "start" operation to start the iteration
473 * Returns the pid+1 as we want to display pid of zero, but NULL would
474 * stop the iteration.
476 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
481 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
482 if (pid >= pid_list->pid_max)
485 /* Return pid + 1 so that zero can be the exit value */
486 for (pid++; pid && l < *pos;
487 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
493 * trace_pid_show - show the current pid in seq_file processing
494 * @m: The seq_file structure to write into
495 * @v: A void pointer of the pid (+1) value to display
497 * Can be directly used by seq_file operations to display the current
500 int trace_pid_show(struct seq_file *m, void *v)
502 unsigned long pid = (unsigned long)v - 1;
504 seq_printf(m, "%lu\n", pid);
508 /* 128 should be much more than enough */
509 #define PID_BUF_SIZE 127
511 int trace_pid_write(struct trace_pid_list *filtered_pids,
512 struct trace_pid_list **new_pid_list,
513 const char __user *ubuf, size_t cnt)
515 struct trace_pid_list *pid_list;
516 struct trace_parser parser;
524 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
528 * Always recreate a new array. The write is an all or nothing
529 * operation. Always create a new array when adding new pids by
530 * the user. If the operation fails, then the current list is
533 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
535 trace_parser_put(&parser);
539 pid_list->pid_max = READ_ONCE(pid_max);
541 /* Only truncating will shrink pid_max */
542 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
543 pid_list->pid_max = filtered_pids->pid_max;
545 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
546 if (!pid_list->pids) {
547 trace_parser_put(&parser);
553 /* copy the current bits to the new max */
554 for_each_set_bit(pid, filtered_pids->pids,
555 filtered_pids->pid_max) {
556 set_bit(pid, pid_list->pids);
565 ret = trace_get_user(&parser, ubuf, cnt, &pos);
566 if (ret < 0 || !trace_parser_loaded(&parser))
574 if (kstrtoul(parser.buffer, 0, &val))
576 if (val >= pid_list->pid_max)
581 set_bit(pid, pid_list->pids);
584 trace_parser_clear(&parser);
587 trace_parser_put(&parser);
590 trace_free_pid_list(pid_list);
595 /* Cleared the list of pids */
596 trace_free_pid_list(pid_list);
601 *new_pid_list = pid_list;
606 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
610 /* Early boot up does not have a buffer yet */
612 return trace_clock_local();
614 ts = ring_buffer_time_stamp(buf->buffer, cpu);
615 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
620 u64 ftrace_now(int cpu)
622 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
626 * tracing_is_enabled - Show if global_trace has been disabled
628 * Shows if the global trace has been enabled or not. It uses the
629 * mirror flag "buffer_disabled" to be used in fast paths such as for
630 * the irqsoff tracer. But it may be inaccurate due to races. If you
631 * need to know the accurate state, use tracing_is_on() which is a little
632 * slower, but accurate.
634 int tracing_is_enabled(void)
637 * For quick access (irqsoff uses this in fast path), just
638 * return the mirror variable of the state of the ring buffer.
639 * It's a little racy, but we don't really care.
642 return !global_trace.buffer_disabled;
646 * trace_buf_size is the size in bytes that is allocated
647 * for a buffer. Note, the number of bytes is always rounded
650 * This number is purposely set to a low number of 16384.
651 * If the dump on oops happens, it will be much appreciated
652 * to not have to wait for all that output. Anyway this can be
653 * boot time and run time configurable.
655 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
657 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
659 /* trace_types holds a link list of available tracers. */
660 static struct tracer *trace_types __read_mostly;
663 * trace_types_lock is used to protect the trace_types list.
665 DEFINE_MUTEX(trace_types_lock);
668 * serialize the access of the ring buffer
670 * ring buffer serializes readers, but it is low level protection.
671 * The validity of the events (which returns by ring_buffer_peek() ..etc)
672 * are not protected by ring buffer.
674 * The content of events may become garbage if we allow other process consumes
675 * these events concurrently:
676 * A) the page of the consumed events may become a normal page
677 * (not reader page) in ring buffer, and this page will be rewrited
678 * by events producer.
679 * B) The page of the consumed events may become a page for splice_read,
680 * and this page will be returned to system.
682 * These primitives allow multi process access to different cpu ring buffer
685 * These primitives don't distinguish read-only and read-consume access.
686 * Multi read-only access are also serialized.
690 static DECLARE_RWSEM(all_cpu_access_lock);
691 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
693 static inline void trace_access_lock(int cpu)
695 if (cpu == RING_BUFFER_ALL_CPUS) {
696 /* gain it for accessing the whole ring buffer. */
697 down_write(&all_cpu_access_lock);
699 /* gain it for accessing a cpu ring buffer. */
701 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
702 down_read(&all_cpu_access_lock);
704 /* Secondly block other access to this @cpu ring buffer. */
705 mutex_lock(&per_cpu(cpu_access_lock, cpu));
709 static inline void trace_access_unlock(int cpu)
711 if (cpu == RING_BUFFER_ALL_CPUS) {
712 up_write(&all_cpu_access_lock);
714 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
715 up_read(&all_cpu_access_lock);
719 static inline void trace_access_lock_init(void)
723 for_each_possible_cpu(cpu)
724 mutex_init(&per_cpu(cpu_access_lock, cpu));
729 static DEFINE_MUTEX(access_lock);
731 static inline void trace_access_lock(int cpu)
734 mutex_lock(&access_lock);
737 static inline void trace_access_unlock(int cpu)
740 mutex_unlock(&access_lock);
743 static inline void trace_access_lock_init(void)
749 #ifdef CONFIG_STACKTRACE
750 static void __ftrace_trace_stack(struct trace_buffer *buffer,
752 int skip, int pc, struct pt_regs *regs);
753 static inline void ftrace_trace_stack(struct trace_array *tr,
754 struct trace_buffer *buffer,
756 int skip, int pc, struct pt_regs *regs);
759 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
761 int skip, int pc, struct pt_regs *regs)
764 static inline void ftrace_trace_stack(struct trace_array *tr,
765 struct trace_buffer *buffer,
767 int skip, int pc, struct pt_regs *regs)
773 static __always_inline void
774 trace_event_setup(struct ring_buffer_event *event,
775 int type, unsigned long flags, int pc)
777 struct trace_entry *ent = ring_buffer_event_data(event);
779 tracing_generic_entry_update(ent, type, flags, pc);
782 static __always_inline struct ring_buffer_event *
783 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
786 unsigned long flags, int pc)
788 struct ring_buffer_event *event;
790 event = ring_buffer_lock_reserve(buffer, len);
792 trace_event_setup(event, type, flags, pc);
797 void tracer_tracing_on(struct trace_array *tr)
799 if (tr->array_buffer.buffer)
800 ring_buffer_record_on(tr->array_buffer.buffer);
802 * This flag is looked at when buffers haven't been allocated
803 * yet, or by some tracers (like irqsoff), that just want to
804 * know if the ring buffer has been disabled, but it can handle
805 * races of where it gets disabled but we still do a record.
806 * As the check is in the fast path of the tracers, it is more
807 * important to be fast than accurate.
809 tr->buffer_disabled = 0;
810 /* Make the flag seen by readers */
815 * tracing_on - enable tracing buffers
817 * This function enables tracing buffers that may have been
818 * disabled with tracing_off.
820 void tracing_on(void)
822 tracer_tracing_on(&global_trace);
824 EXPORT_SYMBOL_GPL(tracing_on);
827 static __always_inline void
828 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
830 __this_cpu_write(trace_taskinfo_save, true);
832 /* If this is the temp buffer, we need to commit fully */
833 if (this_cpu_read(trace_buffered_event) == event) {
834 /* Length is in event->array[0] */
835 ring_buffer_write(buffer, event->array[0], &event->array[1]);
836 /* Release the temp buffer */
837 this_cpu_dec(trace_buffered_event_cnt);
839 ring_buffer_unlock_commit(buffer, event);
843 * __trace_puts - write a constant string into the trace buffer.
844 * @ip: The address of the caller
845 * @str: The constant string to write
846 * @size: The size of the string.
848 int __trace_puts(unsigned long ip, const char *str, int size)
850 struct ring_buffer_event *event;
851 struct trace_buffer *buffer;
852 struct print_entry *entry;
853 unsigned long irq_flags;
857 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
860 pc = preempt_count();
862 if (unlikely(tracing_selftest_running || tracing_disabled))
865 alloc = sizeof(*entry) + size + 2; /* possible \n added */
867 local_save_flags(irq_flags);
868 buffer = global_trace.array_buffer.buffer;
869 ring_buffer_nest_start(buffer);
870 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
877 entry = ring_buffer_event_data(event);
880 memcpy(&entry->buf, str, size);
882 /* Add a newline if necessary */
883 if (entry->buf[size - 1] != '\n') {
884 entry->buf[size] = '\n';
885 entry->buf[size + 1] = '\0';
887 entry->buf[size] = '\0';
889 __buffer_unlock_commit(buffer, event);
890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
892 ring_buffer_nest_end(buffer);
895 EXPORT_SYMBOL_GPL(__trace_puts);
898 * __trace_bputs - write the pointer to a constant string into trace buffer
899 * @ip: The address of the caller
900 * @str: The constant string to write to the buffer to
902 int __trace_bputs(unsigned long ip, const char *str)
904 struct ring_buffer_event *event;
905 struct trace_buffer *buffer;
906 struct bputs_entry *entry;
907 unsigned long irq_flags;
908 int size = sizeof(struct bputs_entry);
912 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
915 pc = preempt_count();
917 if (unlikely(tracing_selftest_running || tracing_disabled))
920 local_save_flags(irq_flags);
921 buffer = global_trace.array_buffer.buffer;
923 ring_buffer_nest_start(buffer);
924 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
929 entry = ring_buffer_event_data(event);
933 __buffer_unlock_commit(buffer, event);
934 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
938 ring_buffer_nest_end(buffer);
941 EXPORT_SYMBOL_GPL(__trace_bputs);
943 #ifdef CONFIG_TRACER_SNAPSHOT
944 void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
946 struct tracer *tracer = tr->current_trace;
950 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
951 internal_trace_puts("*** snapshot is being ignored ***\n");
955 if (!tr->allocated_snapshot) {
956 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
957 internal_trace_puts("*** stopping trace here! ***\n");
962 /* Note, snapshot can not be used when the tracer uses it */
963 if (tracer->use_max_tr) {
964 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
965 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
969 local_irq_save(flags);
970 update_max_tr(tr, current, smp_processor_id(), cond_data);
971 local_irq_restore(flags);
974 void tracing_snapshot_instance(struct trace_array *tr)
976 tracing_snapshot_instance_cond(tr, NULL);
980 * tracing_snapshot - take a snapshot of the current buffer.
982 * This causes a swap between the snapshot buffer and the current live
983 * tracing buffer. You can use this to take snapshots of the live
984 * trace when some condition is triggered, but continue to trace.
986 * Note, make sure to allocate the snapshot with either
987 * a tracing_snapshot_alloc(), or by doing it manually
988 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
990 * If the snapshot buffer is not allocated, it will stop tracing.
991 * Basically making a permanent snapshot.
993 void tracing_snapshot(void)
995 struct trace_array *tr = &global_trace;
997 tracing_snapshot_instance(tr);
999 EXPORT_SYMBOL_GPL(tracing_snapshot);
1002 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1003 * @tr: The tracing instance to snapshot
1004 * @cond_data: The data to be tested conditionally, and possibly saved
1006 * This is the same as tracing_snapshot() except that the snapshot is
1007 * conditional - the snapshot will only happen if the
1008 * cond_snapshot.update() implementation receiving the cond_data
1009 * returns true, which means that the trace array's cond_snapshot
1010 * update() operation used the cond_data to determine whether the
1011 * snapshot should be taken, and if it was, presumably saved it along
1012 * with the snapshot.
1014 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1016 tracing_snapshot_instance_cond(tr, cond_data);
1018 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1021 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1022 * @tr: The tracing instance
1024 * When the user enables a conditional snapshot using
1025 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1026 * with the snapshot. This accessor is used to retrieve it.
1028 * Should not be called from cond_snapshot.update(), since it takes
1029 * the tr->max_lock lock, which the code calling
1030 * cond_snapshot.update() has already done.
1032 * Returns the cond_data associated with the trace array's snapshot.
1034 void *tracing_cond_snapshot_data(struct trace_array *tr)
1036 void *cond_data = NULL;
1038 arch_spin_lock(&tr->max_lock);
1040 if (tr->cond_snapshot)
1041 cond_data = tr->cond_snapshot->cond_data;
1043 arch_spin_unlock(&tr->max_lock);
1047 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1049 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1050 struct array_buffer *size_buf, int cpu_id);
1051 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1053 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1057 if (!tr->allocated_snapshot) {
1059 /* allocate spare buffer */
1060 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1061 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1065 tr->allocated_snapshot = true;
1071 static void free_snapshot(struct trace_array *tr)
1074 * We don't free the ring buffer. instead, resize it because
1075 * The max_tr ring buffer has some state (e.g. ring->clock) and
1076 * we want preserve it.
1078 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1079 set_buffer_entries(&tr->max_buffer, 1);
1080 tracing_reset_online_cpus(&tr->max_buffer);
1081 tr->allocated_snapshot = false;
1085 * tracing_alloc_snapshot - allocate snapshot buffer.
1087 * This only allocates the snapshot buffer if it isn't already
1088 * allocated - it doesn't also take a snapshot.
1090 * This is meant to be used in cases where the snapshot buffer needs
1091 * to be set up for events that can't sleep but need to be able to
1092 * trigger a snapshot.
1094 int tracing_alloc_snapshot(void)
1096 struct trace_array *tr = &global_trace;
1099 ret = tracing_alloc_snapshot_instance(tr);
1104 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1107 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1109 * This is similar to tracing_snapshot(), but it will allocate the
1110 * snapshot buffer if it isn't already allocated. Use this only
1111 * where it is safe to sleep, as the allocation may sleep.
1113 * This causes a swap between the snapshot buffer and the current live
1114 * tracing buffer. You can use this to take snapshots of the live
1115 * trace when some condition is triggered, but continue to trace.
1117 void tracing_snapshot_alloc(void)
1121 ret = tracing_alloc_snapshot();
1127 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1130 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1131 * @tr: The tracing instance
1132 * @cond_data: User data to associate with the snapshot
1133 * @update: Implementation of the cond_snapshot update function
1135 * Check whether the conditional snapshot for the given instance has
1136 * already been enabled, or if the current tracer is already using a
1137 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1138 * save the cond_data and update function inside.
1140 * Returns 0 if successful, error otherwise.
1142 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1143 cond_update_fn_t update)
1145 struct cond_snapshot *cond_snapshot;
1148 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1152 cond_snapshot->cond_data = cond_data;
1153 cond_snapshot->update = update;
1155 mutex_lock(&trace_types_lock);
1157 ret = tracing_alloc_snapshot_instance(tr);
1161 if (tr->current_trace->use_max_tr) {
1167 * The cond_snapshot can only change to NULL without the
1168 * trace_types_lock. We don't care if we race with it going
1169 * to NULL, but we want to make sure that it's not set to
1170 * something other than NULL when we get here, which we can
1171 * do safely with only holding the trace_types_lock and not
1172 * having to take the max_lock.
1174 if (tr->cond_snapshot) {
1179 arch_spin_lock(&tr->max_lock);
1180 tr->cond_snapshot = cond_snapshot;
1181 arch_spin_unlock(&tr->max_lock);
1183 mutex_unlock(&trace_types_lock);
1188 mutex_unlock(&trace_types_lock);
1189 kfree(cond_snapshot);
1192 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1195 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1196 * @tr: The tracing instance
1198 * Check whether the conditional snapshot for the given instance is
1199 * enabled; if so, free the cond_snapshot associated with it,
1200 * otherwise return -EINVAL.
1202 * Returns 0 if successful, error otherwise.
1204 int tracing_snapshot_cond_disable(struct trace_array *tr)
1208 arch_spin_lock(&tr->max_lock);
1210 if (!tr->cond_snapshot)
1213 kfree(tr->cond_snapshot);
1214 tr->cond_snapshot = NULL;
1217 arch_spin_unlock(&tr->max_lock);
1221 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1223 void tracing_snapshot(void)
1225 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1227 EXPORT_SYMBOL_GPL(tracing_snapshot);
1228 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1230 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1232 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1233 int tracing_alloc_snapshot(void)
1235 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1238 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1239 void tracing_snapshot_alloc(void)
1244 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1245 void *tracing_cond_snapshot_data(struct trace_array *tr)
1249 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1250 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1254 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1255 int tracing_snapshot_cond_disable(struct trace_array *tr)
1259 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1260 #endif /* CONFIG_TRACER_SNAPSHOT */
1262 void tracer_tracing_off(struct trace_array *tr)
1264 if (tr->array_buffer.buffer)
1265 ring_buffer_record_off(tr->array_buffer.buffer);
1267 * This flag is looked at when buffers haven't been allocated
1268 * yet, or by some tracers (like irqsoff), that just want to
1269 * know if the ring buffer has been disabled, but it can handle
1270 * races of where it gets disabled but we still do a record.
1271 * As the check is in the fast path of the tracers, it is more
1272 * important to be fast than accurate.
1274 tr->buffer_disabled = 1;
1275 /* Make the flag seen by readers */
1280 * tracing_off - turn off tracing buffers
1282 * This function stops the tracing buffers from recording data.
1283 * It does not disable any overhead the tracers themselves may
1284 * be causing. This function simply causes all recording to
1285 * the ring buffers to fail.
1287 void tracing_off(void)
1289 tracer_tracing_off(&global_trace);
1291 EXPORT_SYMBOL_GPL(tracing_off);
1293 void disable_trace_on_warning(void)
1295 if (__disable_trace_on_warning)
1300 * tracer_tracing_is_on - show real state of ring buffer enabled
1301 * @tr : the trace array to know if ring buffer is enabled
1303 * Shows real state of the ring buffer if it is enabled or not.
1305 bool tracer_tracing_is_on(struct trace_array *tr)
1307 if (tr->array_buffer.buffer)
1308 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1309 return !tr->buffer_disabled;
1313 * tracing_is_on - show state of ring buffers enabled
1315 int tracing_is_on(void)
1317 return tracer_tracing_is_on(&global_trace);
1319 EXPORT_SYMBOL_GPL(tracing_is_on);
1321 static int __init set_buf_size(char *str)
1323 unsigned long buf_size;
1327 buf_size = memparse(str, &str);
1328 /* nr_entries can not be zero */
1331 trace_buf_size = buf_size;
1334 __setup("trace_buf_size=", set_buf_size);
1336 static int __init set_tracing_thresh(char *str)
1338 unsigned long threshold;
1343 ret = kstrtoul(str, 0, &threshold);
1346 tracing_thresh = threshold * 1000;
1349 __setup("tracing_thresh=", set_tracing_thresh);
1351 unsigned long nsecs_to_usecs(unsigned long nsecs)
1353 return nsecs / 1000;
1357 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1358 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1359 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1360 * of strings in the order that the evals (enum) were defined.
1365 /* These must match the bit postions in trace_iterator_flags */
1366 static const char *trace_options[] = {
1374 int in_ns; /* is this clock in nanoseconds? */
1375 } trace_clocks[] = {
1376 { trace_clock_local, "local", 1 },
1377 { trace_clock_global, "global", 1 },
1378 { trace_clock_counter, "counter", 0 },
1379 { trace_clock_jiffies, "uptime", 0 },
1380 { trace_clock, "perf", 1 },
1381 { ktime_get_mono_fast_ns, "mono", 1 },
1382 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1383 { ktime_get_boot_fast_ns, "boot", 1 },
1387 bool trace_clock_in_ns(struct trace_array *tr)
1389 if (trace_clocks[tr->clock_id].in_ns)
1396 * trace_parser_get_init - gets the buffer for trace parser
1398 int trace_parser_get_init(struct trace_parser *parser, int size)
1400 memset(parser, 0, sizeof(*parser));
1402 parser->buffer = kmalloc(size, GFP_KERNEL);
1403 if (!parser->buffer)
1406 parser->size = size;
1411 * trace_parser_put - frees the buffer for trace parser
1413 void trace_parser_put(struct trace_parser *parser)
1415 kfree(parser->buffer);
1416 parser->buffer = NULL;
1420 * trace_get_user - reads the user input string separated by space
1421 * (matched by isspace(ch))
1423 * For each string found the 'struct trace_parser' is updated,
1424 * and the function returns.
1426 * Returns number of bytes read.
1428 * See kernel/trace/trace.h for 'struct trace_parser' details.
1430 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1431 size_t cnt, loff_t *ppos)
1438 trace_parser_clear(parser);
1440 ret = get_user(ch, ubuf++);
1448 * The parser is not finished with the last write,
1449 * continue reading the user input without skipping spaces.
1451 if (!parser->cont) {
1452 /* skip white space */
1453 while (cnt && isspace(ch)) {
1454 ret = get_user(ch, ubuf++);
1463 /* only spaces were written */
1464 if (isspace(ch) || !ch) {
1471 /* read the non-space input */
1472 while (cnt && !isspace(ch) && ch) {
1473 if (parser->idx < parser->size - 1)
1474 parser->buffer[parser->idx++] = ch;
1479 ret = get_user(ch, ubuf++);
1486 /* We either got finished input or we have to wait for another call. */
1487 if (isspace(ch) || !ch) {
1488 parser->buffer[parser->idx] = 0;
1489 parser->cont = false;
1490 } else if (parser->idx < parser->size - 1) {
1491 parser->cont = true;
1492 parser->buffer[parser->idx++] = ch;
1493 /* Make sure the parsed string always terminates with '\0'. */
1494 parser->buffer[parser->idx] = 0;
1507 /* TODO add a seq_buf_to_buffer() */
1508 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1512 if (trace_seq_used(s) <= s->seq.readpos)
1515 len = trace_seq_used(s) - s->seq.readpos;
1518 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1520 s->seq.readpos += cnt;
1524 unsigned long __read_mostly tracing_thresh;
1525 static const struct file_operations tracing_max_lat_fops;
1527 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1528 defined(CONFIG_FSNOTIFY)
1530 static struct workqueue_struct *fsnotify_wq;
1532 static void latency_fsnotify_workfn(struct work_struct *work)
1534 struct trace_array *tr = container_of(work, struct trace_array,
1536 fsnotify(tr->d_max_latency->d_inode, FS_MODIFY,
1537 tr->d_max_latency->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
1540 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1542 struct trace_array *tr = container_of(iwork, struct trace_array,
1544 queue_work(fsnotify_wq, &tr->fsnotify_work);
1547 static void trace_create_maxlat_file(struct trace_array *tr,
1548 struct dentry *d_tracer)
1550 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1551 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1552 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1553 d_tracer, &tr->max_latency,
1554 &tracing_max_lat_fops);
1557 __init static int latency_fsnotify_init(void)
1559 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1560 WQ_UNBOUND | WQ_HIGHPRI, 0);
1562 pr_err("Unable to allocate tr_max_lat_wq\n");
1568 late_initcall_sync(latency_fsnotify_init);
1570 void latency_fsnotify(struct trace_array *tr)
1575 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1576 * possible that we are called from __schedule() or do_idle(), which
1577 * could cause a deadlock.
1579 irq_work_queue(&tr->fsnotify_irqwork);
1583 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1584 * defined(CONFIG_FSNOTIFY)
1588 #define trace_create_maxlat_file(tr, d_tracer) \
1589 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1590 &tr->max_latency, &tracing_max_lat_fops)
1594 #ifdef CONFIG_TRACER_MAX_TRACE
1596 * Copy the new maximum trace into the separate maximum-trace
1597 * structure. (this way the maximum trace is permanently saved,
1598 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1601 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1603 struct array_buffer *trace_buf = &tr->array_buffer;
1604 struct array_buffer *max_buf = &tr->max_buffer;
1605 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1606 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1609 max_buf->time_start = data->preempt_timestamp;
1611 max_data->saved_latency = tr->max_latency;
1612 max_data->critical_start = data->critical_start;
1613 max_data->critical_end = data->critical_end;
1615 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1616 max_data->pid = tsk->pid;
1618 * If tsk == current, then use current_uid(), as that does not use
1619 * RCU. The irq tracer can be called out of RCU scope.
1622 max_data->uid = current_uid();
1624 max_data->uid = task_uid(tsk);
1626 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1627 max_data->policy = tsk->policy;
1628 max_data->rt_priority = tsk->rt_priority;
1630 /* record this tasks comm */
1631 tracing_record_cmdline(tsk);
1632 latency_fsnotify(tr);
1636 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1638 * @tsk: the task with the latency
1639 * @cpu: The cpu that initiated the trace.
1640 * @cond_data: User data associated with a conditional snapshot
1642 * Flip the buffers between the @tr and the max_tr and record information
1643 * about which task was the cause of this latency.
1646 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1652 WARN_ON_ONCE(!irqs_disabled());
1654 if (!tr->allocated_snapshot) {
1655 /* Only the nop tracer should hit this when disabling */
1656 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1660 arch_spin_lock(&tr->max_lock);
1662 /* Inherit the recordable setting from array_buffer */
1663 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1664 ring_buffer_record_on(tr->max_buffer.buffer);
1666 ring_buffer_record_off(tr->max_buffer.buffer);
1668 #ifdef CONFIG_TRACER_SNAPSHOT
1669 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1672 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1674 __update_max_tr(tr, tsk, cpu);
1677 arch_spin_unlock(&tr->max_lock);
1681 * update_max_tr_single - only copy one trace over, and reset the rest
1683 * @tsk: task with the latency
1684 * @cpu: the cpu of the buffer to copy.
1686 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1689 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1696 WARN_ON_ONCE(!irqs_disabled());
1697 if (!tr->allocated_snapshot) {
1698 /* Only the nop tracer should hit this when disabling */
1699 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1703 arch_spin_lock(&tr->max_lock);
1705 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1707 if (ret == -EBUSY) {
1709 * We failed to swap the buffer due to a commit taking
1710 * place on this CPU. We fail to record, but we reset
1711 * the max trace buffer (no one writes directly to it)
1712 * and flag that it failed.
1714 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1715 "Failed to swap buffers due to commit in progress\n");
1718 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1720 __update_max_tr(tr, tsk, cpu);
1721 arch_spin_unlock(&tr->max_lock);
1723 #endif /* CONFIG_TRACER_MAX_TRACE */
1725 static int wait_on_pipe(struct trace_iterator *iter, int full)
1727 /* Iterators are static, they should be filled or empty */
1728 if (trace_buffer_iter(iter, iter->cpu_file))
1731 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1735 #ifdef CONFIG_FTRACE_STARTUP_TEST
1736 static bool selftests_can_run;
1738 struct trace_selftests {
1739 struct list_head list;
1740 struct tracer *type;
1743 static LIST_HEAD(postponed_selftests);
1745 static int save_selftest(struct tracer *type)
1747 struct trace_selftests *selftest;
1749 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1753 selftest->type = type;
1754 list_add(&selftest->list, &postponed_selftests);
1758 static int run_tracer_selftest(struct tracer *type)
1760 struct trace_array *tr = &global_trace;
1761 struct tracer *saved_tracer = tr->current_trace;
1764 if (!type->selftest || tracing_selftest_disabled)
1768 * If a tracer registers early in boot up (before scheduling is
1769 * initialized and such), then do not run its selftests yet.
1770 * Instead, run it a little later in the boot process.
1772 if (!selftests_can_run)
1773 return save_selftest(type);
1776 * Run a selftest on this tracer.
1777 * Here we reset the trace buffer, and set the current
1778 * tracer to be this tracer. The tracer can then run some
1779 * internal tracing to verify that everything is in order.
1780 * If we fail, we do not register this tracer.
1782 tracing_reset_online_cpus(&tr->array_buffer);
1784 tr->current_trace = type;
1786 #ifdef CONFIG_TRACER_MAX_TRACE
1787 if (type->use_max_tr) {
1788 /* If we expanded the buffers, make sure the max is expanded too */
1789 if (ring_buffer_expanded)
1790 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1791 RING_BUFFER_ALL_CPUS);
1792 tr->allocated_snapshot = true;
1796 /* the test is responsible for initializing and enabling */
1797 pr_info("Testing tracer %s: ", type->name);
1798 ret = type->selftest(type, tr);
1799 /* the test is responsible for resetting too */
1800 tr->current_trace = saved_tracer;
1802 printk(KERN_CONT "FAILED!\n");
1803 /* Add the warning after printing 'FAILED' */
1807 /* Only reset on passing, to avoid touching corrupted buffers */
1808 tracing_reset_online_cpus(&tr->array_buffer);
1810 #ifdef CONFIG_TRACER_MAX_TRACE
1811 if (type->use_max_tr) {
1812 tr->allocated_snapshot = false;
1814 /* Shrink the max buffer again */
1815 if (ring_buffer_expanded)
1816 ring_buffer_resize(tr->max_buffer.buffer, 1,
1817 RING_BUFFER_ALL_CPUS);
1821 printk(KERN_CONT "PASSED\n");
1825 static __init int init_trace_selftests(void)
1827 struct trace_selftests *p, *n;
1828 struct tracer *t, **last;
1831 selftests_can_run = true;
1833 mutex_lock(&trace_types_lock);
1835 if (list_empty(&postponed_selftests))
1838 pr_info("Running postponed tracer tests:\n");
1840 tracing_selftest_running = true;
1841 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1842 /* This loop can take minutes when sanitizers are enabled, so
1843 * lets make sure we allow RCU processing.
1846 ret = run_tracer_selftest(p->type);
1847 /* If the test fails, then warn and remove from available_tracers */
1849 WARN(1, "tracer: %s failed selftest, disabling\n",
1851 last = &trace_types;
1852 for (t = trace_types; t; t = t->next) {
1863 tracing_selftest_running = false;
1866 mutex_unlock(&trace_types_lock);
1870 core_initcall(init_trace_selftests);
1872 static inline int run_tracer_selftest(struct tracer *type)
1876 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1878 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1880 static void __init apply_trace_boot_options(void);
1883 * register_tracer - register a tracer with the ftrace system.
1884 * @type: the plugin for the tracer
1886 * Register a new plugin tracer.
1888 int __init register_tracer(struct tracer *type)
1894 pr_info("Tracer must have a name\n");
1898 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1899 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1903 if (security_locked_down(LOCKDOWN_TRACEFS)) {
1904 pr_warn("Can not register tracer %s due to lockdown\n",
1909 mutex_lock(&trace_types_lock);
1911 tracing_selftest_running = true;
1913 for (t = trace_types; t; t = t->next) {
1914 if (strcmp(type->name, t->name) == 0) {
1916 pr_info("Tracer %s already registered\n",
1923 if (!type->set_flag)
1924 type->set_flag = &dummy_set_flag;
1926 /*allocate a dummy tracer_flags*/
1927 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1932 type->flags->val = 0;
1933 type->flags->opts = dummy_tracer_opt;
1935 if (!type->flags->opts)
1936 type->flags->opts = dummy_tracer_opt;
1938 /* store the tracer for __set_tracer_option */
1939 type->flags->trace = type;
1941 ret = run_tracer_selftest(type);
1945 type->next = trace_types;
1947 add_tracer_options(&global_trace, type);
1950 tracing_selftest_running = false;
1951 mutex_unlock(&trace_types_lock);
1953 if (ret || !default_bootup_tracer)
1956 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1959 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1960 /* Do we want this tracer to start on bootup? */
1961 tracing_set_tracer(&global_trace, type->name);
1962 default_bootup_tracer = NULL;
1964 apply_trace_boot_options();
1966 /* disable other selftests, since this will break it. */
1967 tracing_selftest_disabled = true;
1968 #ifdef CONFIG_FTRACE_STARTUP_TEST
1969 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1977 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
1979 struct trace_buffer *buffer = buf->buffer;
1984 ring_buffer_record_disable(buffer);
1986 /* Make sure all commits have finished */
1988 ring_buffer_reset_cpu(buffer, cpu);
1990 ring_buffer_record_enable(buffer);
1993 void tracing_reset_online_cpus(struct array_buffer *buf)
1995 struct trace_buffer *buffer = buf->buffer;
2001 ring_buffer_record_disable(buffer);
2003 /* Make sure all commits have finished */
2006 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2008 for_each_online_cpu(cpu)
2009 ring_buffer_reset_cpu(buffer, cpu);
2011 ring_buffer_record_enable(buffer);
2014 /* Must have trace_types_lock held */
2015 void tracing_reset_all_online_cpus(void)
2017 struct trace_array *tr;
2019 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2020 if (!tr->clear_trace)
2022 tr->clear_trace = false;
2023 tracing_reset_online_cpus(&tr->array_buffer);
2024 #ifdef CONFIG_TRACER_MAX_TRACE
2025 tracing_reset_online_cpus(&tr->max_buffer);
2030 static int *tgid_map;
2032 #define SAVED_CMDLINES_DEFAULT 128
2033 #define NO_CMDLINE_MAP UINT_MAX
2034 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2035 struct saved_cmdlines_buffer {
2036 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2037 unsigned *map_cmdline_to_pid;
2038 unsigned cmdline_num;
2040 char *saved_cmdlines;
2042 static struct saved_cmdlines_buffer *savedcmd;
2044 /* temporary disable recording */
2045 static atomic_t trace_record_taskinfo_disabled __read_mostly;
2047 static inline char *get_saved_cmdlines(int idx)
2049 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2052 static inline void set_cmdline(int idx, const char *cmdline)
2054 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2057 static int allocate_cmdlines_buffer(unsigned int val,
2058 struct saved_cmdlines_buffer *s)
2060 s->map_cmdline_to_pid = kmalloc_array(val,
2061 sizeof(*s->map_cmdline_to_pid),
2063 if (!s->map_cmdline_to_pid)
2066 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2067 if (!s->saved_cmdlines) {
2068 kfree(s->map_cmdline_to_pid);
2073 s->cmdline_num = val;
2074 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2075 sizeof(s->map_pid_to_cmdline));
2076 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2077 val * sizeof(*s->map_cmdline_to_pid));
2082 static int trace_create_savedcmd(void)
2086 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2090 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2100 int is_tracing_stopped(void)
2102 return global_trace.stop_count;
2106 * tracing_start - quick start of the tracer
2108 * If tracing is enabled but was stopped by tracing_stop,
2109 * this will start the tracer back up.
2111 void tracing_start(void)
2113 struct trace_buffer *buffer;
2114 unsigned long flags;
2116 if (tracing_disabled)
2119 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2120 if (--global_trace.stop_count) {
2121 if (global_trace.stop_count < 0) {
2122 /* Someone screwed up their debugging */
2124 global_trace.stop_count = 0;
2129 /* Prevent the buffers from switching */
2130 arch_spin_lock(&global_trace.max_lock);
2132 buffer = global_trace.array_buffer.buffer;
2134 ring_buffer_record_enable(buffer);
2136 #ifdef CONFIG_TRACER_MAX_TRACE
2137 buffer = global_trace.max_buffer.buffer;
2139 ring_buffer_record_enable(buffer);
2142 arch_spin_unlock(&global_trace.max_lock);
2145 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2148 static void tracing_start_tr(struct trace_array *tr)
2150 struct trace_buffer *buffer;
2151 unsigned long flags;
2153 if (tracing_disabled)
2156 /* If global, we need to also start the max tracer */
2157 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2158 return tracing_start();
2160 raw_spin_lock_irqsave(&tr->start_lock, flags);
2162 if (--tr->stop_count) {
2163 if (tr->stop_count < 0) {
2164 /* Someone screwed up their debugging */
2171 buffer = tr->array_buffer.buffer;
2173 ring_buffer_record_enable(buffer);
2176 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2180 * tracing_stop - quick stop of the tracer
2182 * Light weight way to stop tracing. Use in conjunction with
2185 void tracing_stop(void)
2187 struct trace_buffer *buffer;
2188 unsigned long flags;
2190 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2191 if (global_trace.stop_count++)
2194 /* Prevent the buffers from switching */
2195 arch_spin_lock(&global_trace.max_lock);
2197 buffer = global_trace.array_buffer.buffer;
2199 ring_buffer_record_disable(buffer);
2201 #ifdef CONFIG_TRACER_MAX_TRACE
2202 buffer = global_trace.max_buffer.buffer;
2204 ring_buffer_record_disable(buffer);
2207 arch_spin_unlock(&global_trace.max_lock);
2210 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2213 static void tracing_stop_tr(struct trace_array *tr)
2215 struct trace_buffer *buffer;
2216 unsigned long flags;
2218 /* If global, we need to also stop the max tracer */
2219 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2220 return tracing_stop();
2222 raw_spin_lock_irqsave(&tr->start_lock, flags);
2223 if (tr->stop_count++)
2226 buffer = tr->array_buffer.buffer;
2228 ring_buffer_record_disable(buffer);
2231 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2234 static int trace_save_cmdline(struct task_struct *tsk)
2238 /* treat recording of idle task as a success */
2242 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2246 * It's not the end of the world if we don't get
2247 * the lock, but we also don't want to spin
2248 * nor do we want to disable interrupts,
2249 * so if we miss here, then better luck next time.
2251 if (!arch_spin_trylock(&trace_cmdline_lock))
2254 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2255 if (idx == NO_CMDLINE_MAP) {
2256 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2259 * Check whether the cmdline buffer at idx has a pid
2260 * mapped. We are going to overwrite that entry so we
2261 * need to clear the map_pid_to_cmdline. Otherwise we
2262 * would read the new comm for the old pid.
2264 pid = savedcmd->map_cmdline_to_pid[idx];
2265 if (pid != NO_CMDLINE_MAP)
2266 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2268 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2269 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2271 savedcmd->cmdline_idx = idx;
2274 set_cmdline(idx, tsk->comm);
2276 arch_spin_unlock(&trace_cmdline_lock);
2281 static void __trace_find_cmdline(int pid, char comm[])
2286 strcpy(comm, "<idle>");
2290 if (WARN_ON_ONCE(pid < 0)) {
2291 strcpy(comm, "<XXX>");
2295 if (pid > PID_MAX_DEFAULT) {
2296 strcpy(comm, "<...>");
2300 map = savedcmd->map_pid_to_cmdline[pid];
2301 if (map != NO_CMDLINE_MAP)
2302 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2304 strcpy(comm, "<...>");
2307 void trace_find_cmdline(int pid, char comm[])
2310 arch_spin_lock(&trace_cmdline_lock);
2312 __trace_find_cmdline(pid, comm);
2314 arch_spin_unlock(&trace_cmdline_lock);
2318 int trace_find_tgid(int pid)
2320 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2323 return tgid_map[pid];
2326 static int trace_save_tgid(struct task_struct *tsk)
2328 /* treat recording of idle task as a success */
2332 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2335 tgid_map[tsk->pid] = tsk->tgid;
2339 static bool tracing_record_taskinfo_skip(int flags)
2341 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2343 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2345 if (!__this_cpu_read(trace_taskinfo_save))
2351 * tracing_record_taskinfo - record the task info of a task
2353 * @task: task to record
2354 * @flags: TRACE_RECORD_CMDLINE for recording comm
2355 * TRACE_RECORD_TGID for recording tgid
2357 void tracing_record_taskinfo(struct task_struct *task, int flags)
2361 if (tracing_record_taskinfo_skip(flags))
2365 * Record as much task information as possible. If some fail, continue
2366 * to try to record the others.
2368 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2369 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2371 /* If recording any information failed, retry again soon. */
2375 __this_cpu_write(trace_taskinfo_save, false);
2379 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2381 * @prev: previous task during sched_switch
2382 * @next: next task during sched_switch
2383 * @flags: TRACE_RECORD_CMDLINE for recording comm
2384 * TRACE_RECORD_TGID for recording tgid
2386 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2387 struct task_struct *next, int flags)
2391 if (tracing_record_taskinfo_skip(flags))
2395 * Record as much task information as possible. If some fail, continue
2396 * to try to record the others.
2398 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2399 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2400 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2401 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2403 /* If recording any information failed, retry again soon. */
2407 __this_cpu_write(trace_taskinfo_save, false);
2410 /* Helpers to record a specific task information */
2411 void tracing_record_cmdline(struct task_struct *task)
2413 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2416 void tracing_record_tgid(struct task_struct *task)
2418 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2422 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2423 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2424 * simplifies those functions and keeps them in sync.
2426 enum print_line_t trace_handle_return(struct trace_seq *s)
2428 return trace_seq_has_overflowed(s) ?
2429 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2431 EXPORT_SYMBOL_GPL(trace_handle_return);
2434 tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2435 unsigned long flags, int pc)
2437 struct task_struct *tsk = current;
2439 entry->preempt_count = pc & 0xff;
2440 entry->pid = (tsk) ? tsk->pid : 0;
2443 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2444 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2446 TRACE_FLAG_IRQS_NOSUPPORT |
2448 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2449 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2450 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2451 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2452 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2454 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2456 struct ring_buffer_event *
2457 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2460 unsigned long flags, int pc)
2462 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2465 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2466 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2467 static int trace_buffered_event_ref;
2470 * trace_buffered_event_enable - enable buffering events
2472 * When events are being filtered, it is quicker to use a temporary
2473 * buffer to write the event data into if there's a likely chance
2474 * that it will not be committed. The discard of the ring buffer
2475 * is not as fast as committing, and is much slower than copying
2478 * When an event is to be filtered, allocate per cpu buffers to
2479 * write the event data into, and if the event is filtered and discarded
2480 * it is simply dropped, otherwise, the entire data is to be committed
2483 void trace_buffered_event_enable(void)
2485 struct ring_buffer_event *event;
2489 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2491 if (trace_buffered_event_ref++)
2494 for_each_tracing_cpu(cpu) {
2495 page = alloc_pages_node(cpu_to_node(cpu),
2496 GFP_KERNEL | __GFP_NORETRY, 0);
2500 event = page_address(page);
2501 memset(event, 0, sizeof(*event));
2503 per_cpu(trace_buffered_event, cpu) = event;
2506 if (cpu == smp_processor_id() &&
2507 this_cpu_read(trace_buffered_event) !=
2508 per_cpu(trace_buffered_event, cpu))
2515 trace_buffered_event_disable();
2518 static void enable_trace_buffered_event(void *data)
2520 /* Probably not needed, but do it anyway */
2522 this_cpu_dec(trace_buffered_event_cnt);
2525 static void disable_trace_buffered_event(void *data)
2527 this_cpu_inc(trace_buffered_event_cnt);
2531 * trace_buffered_event_disable - disable buffering events
2533 * When a filter is removed, it is faster to not use the buffered
2534 * events, and to commit directly into the ring buffer. Free up
2535 * the temp buffers when there are no more users. This requires
2536 * special synchronization with current events.
2538 void trace_buffered_event_disable(void)
2542 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2544 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2547 if (--trace_buffered_event_ref)
2551 /* For each CPU, set the buffer as used. */
2552 smp_call_function_many(tracing_buffer_mask,
2553 disable_trace_buffered_event, NULL, 1);
2556 /* Wait for all current users to finish */
2559 for_each_tracing_cpu(cpu) {
2560 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2561 per_cpu(trace_buffered_event, cpu) = NULL;
2564 * Make sure trace_buffered_event is NULL before clearing
2565 * trace_buffered_event_cnt.
2570 /* Do the work on each cpu */
2571 smp_call_function_many(tracing_buffer_mask,
2572 enable_trace_buffered_event, NULL, 1);
2576 static struct trace_buffer *temp_buffer;
2578 struct ring_buffer_event *
2579 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2580 struct trace_event_file *trace_file,
2581 int type, unsigned long len,
2582 unsigned long flags, int pc)
2584 struct ring_buffer_event *entry;
2587 *current_rb = trace_file->tr->array_buffer.buffer;
2589 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2590 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2591 (entry = this_cpu_read(trace_buffered_event))) {
2592 /* Try to use the per cpu buffer first */
2593 val = this_cpu_inc_return(trace_buffered_event_cnt);
2595 trace_event_setup(entry, type, flags, pc);
2596 entry->array[0] = len;
2599 this_cpu_dec(trace_buffered_event_cnt);
2602 entry = __trace_buffer_lock_reserve(*current_rb,
2603 type, len, flags, pc);
2605 * If tracing is off, but we have triggers enabled
2606 * we still need to look at the event data. Use the temp_buffer
2607 * to store the trace event for the tigger to use. It's recusive
2608 * safe and will not be recorded anywhere.
2610 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2611 *current_rb = temp_buffer;
2612 entry = __trace_buffer_lock_reserve(*current_rb,
2613 type, len, flags, pc);
2617 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2619 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2620 static DEFINE_MUTEX(tracepoint_printk_mutex);
2622 static void output_printk(struct trace_event_buffer *fbuffer)
2624 struct trace_event_call *event_call;
2625 struct trace_event_file *file;
2626 struct trace_event *event;
2627 unsigned long flags;
2628 struct trace_iterator *iter = tracepoint_print_iter;
2630 /* We should never get here if iter is NULL */
2631 if (WARN_ON_ONCE(!iter))
2634 event_call = fbuffer->trace_file->event_call;
2635 if (!event_call || !event_call->event.funcs ||
2636 !event_call->event.funcs->trace)
2639 file = fbuffer->trace_file;
2640 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2641 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2642 !filter_match_preds(file->filter, fbuffer->entry)))
2645 event = &fbuffer->trace_file->event_call->event;
2647 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2648 trace_seq_init(&iter->seq);
2649 iter->ent = fbuffer->entry;
2650 event_call->event.funcs->trace(iter, 0, event);
2651 trace_seq_putc(&iter->seq, 0);
2652 printk("%s", iter->seq.buffer);
2654 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2657 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2658 void __user *buffer, size_t *lenp,
2661 int save_tracepoint_printk;
2664 mutex_lock(&tracepoint_printk_mutex);
2665 save_tracepoint_printk = tracepoint_printk;
2667 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2670 * This will force exiting early, as tracepoint_printk
2671 * is always zero when tracepoint_printk_iter is not allocated
2673 if (!tracepoint_print_iter)
2674 tracepoint_printk = 0;
2676 if (save_tracepoint_printk == tracepoint_printk)
2679 if (tracepoint_printk)
2680 static_key_enable(&tracepoint_printk_key.key);
2682 static_key_disable(&tracepoint_printk_key.key);
2685 mutex_unlock(&tracepoint_printk_mutex);
2690 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2692 if (static_key_false(&tracepoint_printk_key.key))
2693 output_printk(fbuffer);
2695 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2696 fbuffer->event, fbuffer->entry,
2697 fbuffer->flags, fbuffer->pc, fbuffer->regs);
2699 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2704 * trace_buffer_unlock_commit_regs()
2705 * trace_event_buffer_commit()
2706 * trace_event_raw_event_xxx()
2708 # define STACK_SKIP 3
2710 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2711 struct trace_buffer *buffer,
2712 struct ring_buffer_event *event,
2713 unsigned long flags, int pc,
2714 struct pt_regs *regs)
2716 __buffer_unlock_commit(buffer, event);
2719 * If regs is not set, then skip the necessary functions.
2720 * Note, we can still get here via blktrace, wakeup tracer
2721 * and mmiotrace, but that's ok if they lose a function or
2722 * two. They are not that meaningful.
2724 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2725 ftrace_trace_userstack(buffer, flags, pc);
2729 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2732 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2733 struct ring_buffer_event *event)
2735 __buffer_unlock_commit(buffer, event);
2739 trace_process_export(struct trace_export *export,
2740 struct ring_buffer_event *event)
2742 struct trace_entry *entry;
2743 unsigned int size = 0;
2745 entry = ring_buffer_event_data(event);
2746 size = ring_buffer_event_length(event);
2747 export->write(export, entry, size);
2750 static DEFINE_MUTEX(ftrace_export_lock);
2752 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2754 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2756 static inline void ftrace_exports_enable(void)
2758 static_branch_enable(&ftrace_exports_enabled);
2761 static inline void ftrace_exports_disable(void)
2763 static_branch_disable(&ftrace_exports_enabled);
2766 static void ftrace_exports(struct ring_buffer_event *event)
2768 struct trace_export *export;
2770 preempt_disable_notrace();
2772 export = rcu_dereference_raw_check(ftrace_exports_list);
2774 trace_process_export(export, event);
2775 export = rcu_dereference_raw_check(export->next);
2778 preempt_enable_notrace();
2782 add_trace_export(struct trace_export **list, struct trace_export *export)
2784 rcu_assign_pointer(export->next, *list);
2786 * We are entering export into the list but another
2787 * CPU might be walking that list. We need to make sure
2788 * the export->next pointer is valid before another CPU sees
2789 * the export pointer included into the list.
2791 rcu_assign_pointer(*list, export);
2795 rm_trace_export(struct trace_export **list, struct trace_export *export)
2797 struct trace_export **p;
2799 for (p = list; *p != NULL; p = &(*p)->next)
2806 rcu_assign_pointer(*p, (*p)->next);
2812 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2815 ftrace_exports_enable();
2817 add_trace_export(list, export);
2821 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2825 ret = rm_trace_export(list, export);
2827 ftrace_exports_disable();
2832 int register_ftrace_export(struct trace_export *export)
2834 if (WARN_ON_ONCE(!export->write))
2837 mutex_lock(&ftrace_export_lock);
2839 add_ftrace_export(&ftrace_exports_list, export);
2841 mutex_unlock(&ftrace_export_lock);
2845 EXPORT_SYMBOL_GPL(register_ftrace_export);
2847 int unregister_ftrace_export(struct trace_export *export)
2851 mutex_lock(&ftrace_export_lock);
2853 ret = rm_ftrace_export(&ftrace_exports_list, export);
2855 mutex_unlock(&ftrace_export_lock);
2859 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2862 trace_function(struct trace_array *tr,
2863 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2866 struct trace_event_call *call = &event_function;
2867 struct trace_buffer *buffer = tr->array_buffer.buffer;
2868 struct ring_buffer_event *event;
2869 struct ftrace_entry *entry;
2871 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2875 entry = ring_buffer_event_data(event);
2877 entry->parent_ip = parent_ip;
2879 if (!call_filter_check_discard(call, entry, buffer, event)) {
2880 if (static_branch_unlikely(&ftrace_exports_enabled))
2881 ftrace_exports(event);
2882 __buffer_unlock_commit(buffer, event);
2886 #ifdef CONFIG_STACKTRACE
2888 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2889 #define FTRACE_KSTACK_NESTING 4
2891 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2893 struct ftrace_stack {
2894 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2898 struct ftrace_stacks {
2899 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2902 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2903 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2905 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2906 unsigned long flags,
2907 int skip, int pc, struct pt_regs *regs)
2909 struct trace_event_call *call = &event_kernel_stack;
2910 struct ring_buffer_event *event;
2911 unsigned int size, nr_entries;
2912 struct ftrace_stack *fstack;
2913 struct stack_entry *entry;
2917 * Add one, for this function and the call to save_stack_trace()
2918 * If regs is set, then these functions will not be in the way.
2920 #ifndef CONFIG_UNWINDER_ORC
2926 * Since events can happen in NMIs there's no safe way to
2927 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2928 * or NMI comes in, it will just have to use the default
2929 * FTRACE_STACK_SIZE.
2931 preempt_disable_notrace();
2933 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2935 /* This should never happen. If it does, yell once and skip */
2936 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2940 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2941 * interrupt will either see the value pre increment or post
2942 * increment. If the interrupt happens pre increment it will have
2943 * restored the counter when it returns. We just need a barrier to
2944 * keep gcc from moving things around.
2948 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2949 size = ARRAY_SIZE(fstack->calls);
2952 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2955 nr_entries = stack_trace_save(fstack->calls, size, skip);
2958 size = nr_entries * sizeof(unsigned long);
2959 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2960 sizeof(*entry) + size, flags, pc);
2963 entry = ring_buffer_event_data(event);
2965 memcpy(&entry->caller, fstack->calls, size);
2966 entry->size = nr_entries;
2968 if (!call_filter_check_discard(call, entry, buffer, event))
2969 __buffer_unlock_commit(buffer, event);
2972 /* Again, don't let gcc optimize things here */
2974 __this_cpu_dec(ftrace_stack_reserve);
2975 preempt_enable_notrace();
2979 static inline void ftrace_trace_stack(struct trace_array *tr,
2980 struct trace_buffer *buffer,
2981 unsigned long flags,
2982 int skip, int pc, struct pt_regs *regs)
2984 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2987 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2990 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2993 struct trace_buffer *buffer = tr->array_buffer.buffer;
2995 if (rcu_is_watching()) {
2996 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3001 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3002 * but if the above rcu_is_watching() failed, then the NMI
3003 * triggered someplace critical, and rcu_irq_enter() should
3004 * not be called from NMI.
3006 if (unlikely(in_nmi()))
3009 rcu_irq_enter_irqson();
3010 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3011 rcu_irq_exit_irqson();
3015 * trace_dump_stack - record a stack back trace in the trace buffer
3016 * @skip: Number of functions to skip (helper handlers)
3018 void trace_dump_stack(int skip)
3020 unsigned long flags;
3022 if (tracing_disabled || tracing_selftest_running)
3025 local_save_flags(flags);
3027 #ifndef CONFIG_UNWINDER_ORC
3028 /* Skip 1 to skip this function. */
3031 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3032 flags, skip, preempt_count(), NULL);
3034 EXPORT_SYMBOL_GPL(trace_dump_stack);
3036 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3037 static DEFINE_PER_CPU(int, user_stack_count);
3040 ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
3042 struct trace_event_call *call = &event_user_stack;
3043 struct ring_buffer_event *event;
3044 struct userstack_entry *entry;
3046 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
3050 * NMIs can not handle page faults, even with fix ups.
3051 * The save user stack can (and often does) fault.
3053 if (unlikely(in_nmi()))
3057 * prevent recursion, since the user stack tracing may
3058 * trigger other kernel events.
3061 if (__this_cpu_read(user_stack_count))
3064 __this_cpu_inc(user_stack_count);
3066 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3067 sizeof(*entry), flags, pc);
3069 goto out_drop_count;
3070 entry = ring_buffer_event_data(event);
3072 entry->tgid = current->tgid;
3073 memset(&entry->caller, 0, sizeof(entry->caller));
3075 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3076 if (!call_filter_check_discard(call, entry, buffer, event))
3077 __buffer_unlock_commit(buffer, event);
3080 __this_cpu_dec(user_stack_count);
3084 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3085 static void ftrace_trace_userstack(struct trace_buffer *buffer,
3086 unsigned long flags, int pc)
3089 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3091 #endif /* CONFIG_STACKTRACE */
3093 /* created for use with alloc_percpu */
3094 struct trace_buffer_struct {
3096 char buffer[4][TRACE_BUF_SIZE];
3099 static struct trace_buffer_struct *trace_percpu_buffer;
3102 * Thise allows for lockless recording. If we're nested too deeply, then
3103 * this returns NULL.
3105 static char *get_trace_buf(void)
3107 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3109 if (!buffer || buffer->nesting >= 4)
3114 /* Interrupts must see nesting incremented before we use the buffer */
3116 return &buffer->buffer[buffer->nesting][0];
3119 static void put_trace_buf(void)
3121 /* Don't let the decrement of nesting leak before this */
3123 this_cpu_dec(trace_percpu_buffer->nesting);
3126 static int alloc_percpu_trace_buffer(void)
3128 struct trace_buffer_struct *buffers;
3130 buffers = alloc_percpu(struct trace_buffer_struct);
3131 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3134 trace_percpu_buffer = buffers;
3138 static int buffers_allocated;
3140 void trace_printk_init_buffers(void)
3142 if (buffers_allocated)
3145 if (alloc_percpu_trace_buffer())
3148 /* trace_printk() is for debug use only. Don't use it in production. */
3151 pr_warn("**********************************************************\n");
3152 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3154 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3156 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3157 pr_warn("** unsafe for production use. **\n");
3159 pr_warn("** If you see this message and you are not debugging **\n");
3160 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3162 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3163 pr_warn("**********************************************************\n");
3165 /* Expand the buffers to set size */
3166 tracing_update_buffers();
3168 buffers_allocated = 1;
3171 * trace_printk_init_buffers() can be called by modules.
3172 * If that happens, then we need to start cmdline recording
3173 * directly here. If the global_trace.buffer is already
3174 * allocated here, then this was called by module code.
3176 if (global_trace.array_buffer.buffer)
3177 tracing_start_cmdline_record();
3179 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3181 void trace_printk_start_comm(void)
3183 /* Start tracing comms if trace printk is set */
3184 if (!buffers_allocated)
3186 tracing_start_cmdline_record();
3189 static void trace_printk_start_stop_comm(int enabled)
3191 if (!buffers_allocated)
3195 tracing_start_cmdline_record();
3197 tracing_stop_cmdline_record();
3201 * trace_vbprintk - write binary msg to tracing buffer
3202 * @ip: The address of the caller
3203 * @fmt: The string format to write to the buffer
3204 * @args: Arguments for @fmt
3206 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3208 struct trace_event_call *call = &event_bprint;
3209 struct ring_buffer_event *event;
3210 struct trace_buffer *buffer;
3211 struct trace_array *tr = &global_trace;
3212 struct bprint_entry *entry;
3213 unsigned long flags;
3215 int len = 0, size, pc;
3217 if (unlikely(tracing_selftest_running || tracing_disabled))
3220 /* Don't pollute graph traces with trace_vprintk internals */
3221 pause_graph_tracing();
3223 pc = preempt_count();
3224 preempt_disable_notrace();
3226 tbuffer = get_trace_buf();
3232 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3234 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3237 local_save_flags(flags);
3238 size = sizeof(*entry) + sizeof(u32) * len;
3239 buffer = tr->array_buffer.buffer;
3240 ring_buffer_nest_start(buffer);
3241 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3245 entry = ring_buffer_event_data(event);
3249 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3250 if (!call_filter_check_discard(call, entry, buffer, event)) {
3251 __buffer_unlock_commit(buffer, event);
3252 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3256 ring_buffer_nest_end(buffer);
3261 preempt_enable_notrace();
3262 unpause_graph_tracing();
3266 EXPORT_SYMBOL_GPL(trace_vbprintk);
3270 __trace_array_vprintk(struct trace_buffer *buffer,
3271 unsigned long ip, const char *fmt, va_list args)
3273 struct trace_event_call *call = &event_print;
3274 struct ring_buffer_event *event;
3275 int len = 0, size, pc;
3276 struct print_entry *entry;
3277 unsigned long flags;
3280 if (tracing_disabled || tracing_selftest_running)
3283 /* Don't pollute graph traces with trace_vprintk internals */
3284 pause_graph_tracing();
3286 pc = preempt_count();
3287 preempt_disable_notrace();
3290 tbuffer = get_trace_buf();
3296 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3298 local_save_flags(flags);
3299 size = sizeof(*entry) + len + 1;
3300 ring_buffer_nest_start(buffer);
3301 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3305 entry = ring_buffer_event_data(event);
3308 memcpy(&entry->buf, tbuffer, len + 1);
3309 if (!call_filter_check_discard(call, entry, buffer, event)) {
3310 __buffer_unlock_commit(buffer, event);
3311 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3315 ring_buffer_nest_end(buffer);
3319 preempt_enable_notrace();
3320 unpause_graph_tracing();
3326 int trace_array_vprintk(struct trace_array *tr,
3327 unsigned long ip, const char *fmt, va_list args)
3329 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3333 int trace_array_printk(struct trace_array *tr,
3334 unsigned long ip, const char *fmt, ...)
3339 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3346 ret = trace_array_vprintk(tr, ip, fmt, ap);
3350 EXPORT_SYMBOL_GPL(trace_array_printk);
3353 int trace_array_printk_buf(struct trace_buffer *buffer,
3354 unsigned long ip, const char *fmt, ...)
3359 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3363 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3369 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3371 return trace_array_vprintk(&global_trace, ip, fmt, args);
3373 EXPORT_SYMBOL_GPL(trace_vprintk);
3375 static void trace_iterator_increment(struct trace_iterator *iter)
3377 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3381 ring_buffer_read(buf_iter, NULL);
3384 static struct trace_entry *
3385 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3386 unsigned long *lost_events)
3388 struct ring_buffer_event *event;
3389 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3392 event = ring_buffer_iter_peek(buf_iter, ts);
3394 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3398 iter->ent_size = ring_buffer_event_length(event);
3399 return ring_buffer_event_data(event);
3405 static struct trace_entry *
3406 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3407 unsigned long *missing_events, u64 *ent_ts)
3409 struct trace_buffer *buffer = iter->array_buffer->buffer;
3410 struct trace_entry *ent, *next = NULL;
3411 unsigned long lost_events = 0, next_lost = 0;
3412 int cpu_file = iter->cpu_file;
3413 u64 next_ts = 0, ts;
3419 * If we are in a per_cpu trace file, don't bother by iterating over
3420 * all cpu and peek directly.
3422 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3423 if (ring_buffer_empty_cpu(buffer, cpu_file))
3425 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3427 *ent_cpu = cpu_file;
3432 for_each_tracing_cpu(cpu) {
3434 if (ring_buffer_empty_cpu(buffer, cpu))
3437 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3440 * Pick the entry with the smallest timestamp:
3442 if (ent && (!next || ts < next_ts)) {
3446 next_lost = lost_events;
3447 next_size = iter->ent_size;
3451 iter->ent_size = next_size;
3454 *ent_cpu = next_cpu;
3460 *missing_events = next_lost;
3465 /* Find the next real entry, without updating the iterator itself */
3466 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3467 int *ent_cpu, u64 *ent_ts)
3469 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3472 /* Find the next real entry, and increment the iterator to the next entry */
3473 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3475 iter->ent = __find_next_entry(iter, &iter->cpu,
3476 &iter->lost_events, &iter->ts);
3479 trace_iterator_increment(iter);
3481 return iter->ent ? iter : NULL;
3484 static void trace_consume(struct trace_iterator *iter)
3486 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3487 &iter->lost_events);
3490 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3492 struct trace_iterator *iter = m->private;
3496 WARN_ON_ONCE(iter->leftover);
3500 /* can't go backwards */
3505 ent = trace_find_next_entry_inc(iter);
3509 while (ent && iter->idx < i)
3510 ent = trace_find_next_entry_inc(iter);
3517 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3519 struct ring_buffer_event *event;
3520 struct ring_buffer_iter *buf_iter;
3521 unsigned long entries = 0;
3524 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3526 buf_iter = trace_buffer_iter(iter, cpu);
3530 ring_buffer_iter_reset(buf_iter);
3533 * We could have the case with the max latency tracers
3534 * that a reset never took place on a cpu. This is evident
3535 * by the timestamp being before the start of the buffer.
3537 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3538 if (ts >= iter->array_buffer->time_start)
3541 ring_buffer_read(buf_iter, NULL);
3544 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3548 * The current tracer is copied to avoid a global locking
3551 static void *s_start(struct seq_file *m, loff_t *pos)
3553 struct trace_iterator *iter = m->private;
3554 struct trace_array *tr = iter->tr;
3555 int cpu_file = iter->cpu_file;
3561 * copy the tracer to avoid using a global lock all around.
3562 * iter->trace is a copy of current_trace, the pointer to the
3563 * name may be used instead of a strcmp(), as iter->trace->name
3564 * will point to the same string as current_trace->name.
3566 mutex_lock(&trace_types_lock);
3567 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3568 *iter->trace = *tr->current_trace;
3569 mutex_unlock(&trace_types_lock);
3571 #ifdef CONFIG_TRACER_MAX_TRACE
3572 if (iter->snapshot && iter->trace->use_max_tr)
3573 return ERR_PTR(-EBUSY);
3576 if (!iter->snapshot)
3577 atomic_inc(&trace_record_taskinfo_disabled);
3579 if (*pos != iter->pos) {
3584 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3585 for_each_tracing_cpu(cpu)
3586 tracing_iter_reset(iter, cpu);
3588 tracing_iter_reset(iter, cpu_file);
3591 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3596 * If we overflowed the seq_file before, then we want
3597 * to just reuse the trace_seq buffer again.
3603 p = s_next(m, p, &l);
3607 trace_event_read_lock();
3608 trace_access_lock(cpu_file);
3612 static void s_stop(struct seq_file *m, void *p)
3614 struct trace_iterator *iter = m->private;
3616 #ifdef CONFIG_TRACER_MAX_TRACE
3617 if (iter->snapshot && iter->trace->use_max_tr)
3621 if (!iter->snapshot)
3622 atomic_dec(&trace_record_taskinfo_disabled);
3624 trace_access_unlock(iter->cpu_file);
3625 trace_event_read_unlock();
3629 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3630 unsigned long *entries, int cpu)
3632 unsigned long count;
3634 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3636 * If this buffer has skipped entries, then we hold all
3637 * entries for the trace and we need to ignore the
3638 * ones before the time stamp.
3640 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3641 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3642 /* total is the same as the entries */
3646 ring_buffer_overrun_cpu(buf->buffer, cpu);
3651 get_total_entries(struct array_buffer *buf,
3652 unsigned long *total, unsigned long *entries)
3660 for_each_tracing_cpu(cpu) {
3661 get_total_entries_cpu(buf, &t, &e, cpu);
3667 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3669 unsigned long total, entries;
3674 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
3679 unsigned long trace_total_entries(struct trace_array *tr)
3681 unsigned long total, entries;
3686 get_total_entries(&tr->array_buffer, &total, &entries);
3691 static void print_lat_help_header(struct seq_file *m)
3693 seq_puts(m, "# _------=> CPU# \n"
3694 "# / _-----=> irqs-off \n"
3695 "# | / _----=> need-resched \n"
3696 "# || / _---=> hardirq/softirq \n"
3697 "# ||| / _--=> preempt-depth \n"
3699 "# cmd pid ||||| time | caller \n"
3700 "# \\ / ||||| \\ | / \n");
3703 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
3705 unsigned long total;
3706 unsigned long entries;
3708 get_total_entries(buf, &total, &entries);
3709 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3710 entries, total, num_online_cpus());
3714 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
3717 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3719 print_event_info(buf, m);
3721 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3722 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3725 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
3728 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3729 const char *space = " ";
3730 int prec = tgid ? 10 : 2;
3732 print_event_info(buf, m);
3734 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3735 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3736 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3737 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3738 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3739 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3740 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3744 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3746 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3747 struct array_buffer *buf = iter->array_buffer;
3748 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3749 struct tracer *type = iter->trace;
3750 unsigned long entries;
3751 unsigned long total;
3752 const char *name = "preemption";
3756 get_total_entries(buf, &total, &entries);
3758 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3760 seq_puts(m, "# -----------------------------------"
3761 "---------------------------------\n");
3762 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3763 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3764 nsecs_to_usecs(data->saved_latency),
3768 #if defined(CONFIG_PREEMPT_NONE)
3770 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3772 #elif defined(CONFIG_PREEMPT)
3774 #elif defined(CONFIG_PREEMPT_RT)
3779 /* These are reserved for later use */
3782 seq_printf(m, " #P:%d)\n", num_online_cpus());
3786 seq_puts(m, "# -----------------\n");
3787 seq_printf(m, "# | task: %.16s-%d "
3788 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3789 data->comm, data->pid,
3790 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3791 data->policy, data->rt_priority);
3792 seq_puts(m, "# -----------------\n");
3794 if (data->critical_start) {
3795 seq_puts(m, "# => started at: ");
3796 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3797 trace_print_seq(m, &iter->seq);
3798 seq_puts(m, "\n# => ended at: ");
3799 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3800 trace_print_seq(m, &iter->seq);
3801 seq_puts(m, "\n#\n");
3807 static void test_cpu_buff_start(struct trace_iterator *iter)
3809 struct trace_seq *s = &iter->seq;
3810 struct trace_array *tr = iter->tr;
3812 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3815 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3818 if (cpumask_available(iter->started) &&
3819 cpumask_test_cpu(iter->cpu, iter->started))
3822 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
3825 if (cpumask_available(iter->started))
3826 cpumask_set_cpu(iter->cpu, iter->started);
3828 /* Don't print started cpu buffer for the first entry of the trace */
3830 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3834 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3836 struct trace_array *tr = iter->tr;
3837 struct trace_seq *s = &iter->seq;
3838 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3839 struct trace_entry *entry;
3840 struct trace_event *event;
3844 test_cpu_buff_start(iter);
3846 event = ftrace_find_event(entry->type);
3848 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3849 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3850 trace_print_lat_context(iter);
3852 trace_print_context(iter);
3855 if (trace_seq_has_overflowed(s))
3856 return TRACE_TYPE_PARTIAL_LINE;
3859 return event->funcs->trace(iter, sym_flags, event);
3861 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3863 return trace_handle_return(s);
3866 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3868 struct trace_array *tr = iter->tr;
3869 struct trace_seq *s = &iter->seq;
3870 struct trace_entry *entry;
3871 struct trace_event *event;
3875 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3876 trace_seq_printf(s, "%d %d %llu ",
3877 entry->pid, iter->cpu, iter->ts);
3879 if (trace_seq_has_overflowed(s))
3880 return TRACE_TYPE_PARTIAL_LINE;
3882 event = ftrace_find_event(entry->type);
3884 return event->funcs->raw(iter, 0, event);
3886 trace_seq_printf(s, "%d ?\n", entry->type);
3888 return trace_handle_return(s);
3891 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3893 struct trace_array *tr = iter->tr;
3894 struct trace_seq *s = &iter->seq;
3895 unsigned char newline = '\n';
3896 struct trace_entry *entry;
3897 struct trace_event *event;
3901 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3902 SEQ_PUT_HEX_FIELD(s, entry->pid);
3903 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3904 SEQ_PUT_HEX_FIELD(s, iter->ts);
3905 if (trace_seq_has_overflowed(s))
3906 return TRACE_TYPE_PARTIAL_LINE;
3909 event = ftrace_find_event(entry->type);
3911 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3912 if (ret != TRACE_TYPE_HANDLED)
3916 SEQ_PUT_FIELD(s, newline);
3918 return trace_handle_return(s);
3921 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3923 struct trace_array *tr = iter->tr;
3924 struct trace_seq *s = &iter->seq;
3925 struct trace_entry *entry;
3926 struct trace_event *event;
3930 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3931 SEQ_PUT_FIELD(s, entry->pid);
3932 SEQ_PUT_FIELD(s, iter->cpu);
3933 SEQ_PUT_FIELD(s, iter->ts);
3934 if (trace_seq_has_overflowed(s))
3935 return TRACE_TYPE_PARTIAL_LINE;
3938 event = ftrace_find_event(entry->type);
3939 return event ? event->funcs->binary(iter, 0, event) :
3943 int trace_empty(struct trace_iterator *iter)
3945 struct ring_buffer_iter *buf_iter;
3948 /* If we are looking at one CPU buffer, only check that one */
3949 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3950 cpu = iter->cpu_file;
3951 buf_iter = trace_buffer_iter(iter, cpu);
3953 if (!ring_buffer_iter_empty(buf_iter))
3956 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
3962 for_each_tracing_cpu(cpu) {
3963 buf_iter = trace_buffer_iter(iter, cpu);
3965 if (!ring_buffer_iter_empty(buf_iter))
3968 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
3976 /* Called with trace_event_read_lock() held. */
3977 enum print_line_t print_trace_line(struct trace_iterator *iter)
3979 struct trace_array *tr = iter->tr;
3980 unsigned long trace_flags = tr->trace_flags;
3981 enum print_line_t ret;
3983 if (iter->lost_events) {
3984 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3985 iter->cpu, iter->lost_events);
3986 if (trace_seq_has_overflowed(&iter->seq))
3987 return TRACE_TYPE_PARTIAL_LINE;
3990 if (iter->trace && iter->trace->print_line) {
3991 ret = iter->trace->print_line(iter);
3992 if (ret != TRACE_TYPE_UNHANDLED)
3996 if (iter->ent->type == TRACE_BPUTS &&
3997 trace_flags & TRACE_ITER_PRINTK &&
3998 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3999 return trace_print_bputs_msg_only(iter);
4001 if (iter->ent->type == TRACE_BPRINT &&
4002 trace_flags & TRACE_ITER_PRINTK &&
4003 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4004 return trace_print_bprintk_msg_only(iter);
4006 if (iter->ent->type == TRACE_PRINT &&
4007 trace_flags & TRACE_ITER_PRINTK &&
4008 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4009 return trace_print_printk_msg_only(iter);
4011 if (trace_flags & TRACE_ITER_BIN)
4012 return print_bin_fmt(iter);
4014 if (trace_flags & TRACE_ITER_HEX)
4015 return print_hex_fmt(iter);
4017 if (trace_flags & TRACE_ITER_RAW)
4018 return print_raw_fmt(iter);
4020 return print_trace_fmt(iter);
4023 void trace_latency_header(struct seq_file *m)
4025 struct trace_iterator *iter = m->private;
4026 struct trace_array *tr = iter->tr;
4028 /* print nothing if the buffers are empty */
4029 if (trace_empty(iter))
4032 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4033 print_trace_header(m, iter);
4035 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4036 print_lat_help_header(m);
4039 void trace_default_header(struct seq_file *m)
4041 struct trace_iterator *iter = m->private;
4042 struct trace_array *tr = iter->tr;
4043 unsigned long trace_flags = tr->trace_flags;
4045 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4048 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4049 /* print nothing if the buffers are empty */
4050 if (trace_empty(iter))
4052 print_trace_header(m, iter);
4053 if (!(trace_flags & TRACE_ITER_VERBOSE))
4054 print_lat_help_header(m);
4056 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4057 if (trace_flags & TRACE_ITER_IRQ_INFO)
4058 print_func_help_header_irq(iter->array_buffer,
4061 print_func_help_header(iter->array_buffer, m,
4067 static void test_ftrace_alive(struct seq_file *m)
4069 if (!ftrace_is_dead())
4071 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4072 "# MAY BE MISSING FUNCTION EVENTS\n");
4075 #ifdef CONFIG_TRACER_MAX_TRACE
4076 static void show_snapshot_main_help(struct seq_file *m)
4078 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4079 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4080 "# Takes a snapshot of the main buffer.\n"
4081 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4082 "# (Doesn't have to be '2' works with any number that\n"
4083 "# is not a '0' or '1')\n");
4086 static void show_snapshot_percpu_help(struct seq_file *m)
4088 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4089 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4090 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4091 "# Takes a snapshot of the main buffer for this cpu.\n");
4093 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4094 "# Must use main snapshot file to allocate.\n");
4096 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4097 "# (Doesn't have to be '2' works with any number that\n"
4098 "# is not a '0' or '1')\n");
4101 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4103 if (iter->tr->allocated_snapshot)
4104 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4106 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4108 seq_puts(m, "# Snapshot commands:\n");
4109 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4110 show_snapshot_main_help(m);
4112 show_snapshot_percpu_help(m);
4115 /* Should never be called */
4116 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4119 static int s_show(struct seq_file *m, void *v)
4121 struct trace_iterator *iter = v;
4124 if (iter->ent == NULL) {
4126 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4128 test_ftrace_alive(m);
4130 if (iter->snapshot && trace_empty(iter))
4131 print_snapshot_help(m, iter);
4132 else if (iter->trace && iter->trace->print_header)
4133 iter->trace->print_header(m);
4135 trace_default_header(m);
4137 } else if (iter->leftover) {
4139 * If we filled the seq_file buffer earlier, we
4140 * want to just show it now.
4142 ret = trace_print_seq(m, &iter->seq);
4144 /* ret should this time be zero, but you never know */
4145 iter->leftover = ret;
4148 print_trace_line(iter);
4149 ret = trace_print_seq(m, &iter->seq);
4151 * If we overflow the seq_file buffer, then it will
4152 * ask us for this data again at start up.
4154 * ret is 0 if seq_file write succeeded.
4157 iter->leftover = ret;
4164 * Should be used after trace_array_get(), trace_types_lock
4165 * ensures that i_cdev was already initialized.
4167 static inline int tracing_get_cpu(struct inode *inode)
4169 if (inode->i_cdev) /* See trace_create_cpu_file() */
4170 return (long)inode->i_cdev - 1;
4171 return RING_BUFFER_ALL_CPUS;
4174 static const struct seq_operations tracer_seq_ops = {
4181 static struct trace_iterator *
4182 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4184 struct trace_array *tr = inode->i_private;
4185 struct trace_iterator *iter;
4188 if (tracing_disabled)
4189 return ERR_PTR(-ENODEV);
4191 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4193 return ERR_PTR(-ENOMEM);
4195 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4197 if (!iter->buffer_iter)
4201 * We make a copy of the current tracer to avoid concurrent
4202 * changes on it while we are reading.
4204 mutex_lock(&trace_types_lock);
4205 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4209 *iter->trace = *tr->current_trace;
4211 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4216 #ifdef CONFIG_TRACER_MAX_TRACE
4217 /* Currently only the top directory has a snapshot */
4218 if (tr->current_trace->print_max || snapshot)
4219 iter->array_buffer = &tr->max_buffer;
4222 iter->array_buffer = &tr->array_buffer;
4223 iter->snapshot = snapshot;
4225 iter->cpu_file = tracing_get_cpu(inode);
4226 mutex_init(&iter->mutex);
4228 /* Notify the tracer early; before we stop tracing. */
4229 if (iter->trace->open)
4230 iter->trace->open(iter);
4232 /* Annotate start of buffers if we had overruns */
4233 if (ring_buffer_overruns(iter->array_buffer->buffer))
4234 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4236 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4237 if (trace_clocks[tr->clock_id].in_ns)
4238 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4240 /* stop the trace while dumping if we are not opening "snapshot" */
4241 if (!iter->snapshot)
4242 tracing_stop_tr(tr);
4244 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4245 for_each_tracing_cpu(cpu) {
4246 iter->buffer_iter[cpu] =
4247 ring_buffer_read_prepare(iter->array_buffer->buffer,
4250 ring_buffer_read_prepare_sync();
4251 for_each_tracing_cpu(cpu) {
4252 ring_buffer_read_start(iter->buffer_iter[cpu]);
4253 tracing_iter_reset(iter, cpu);
4256 cpu = iter->cpu_file;
4257 iter->buffer_iter[cpu] =
4258 ring_buffer_read_prepare(iter->array_buffer->buffer,
4260 ring_buffer_read_prepare_sync();
4261 ring_buffer_read_start(iter->buffer_iter[cpu]);
4262 tracing_iter_reset(iter, cpu);
4265 mutex_unlock(&trace_types_lock);
4270 mutex_unlock(&trace_types_lock);
4272 kfree(iter->buffer_iter);
4274 seq_release_private(inode, file);
4275 return ERR_PTR(-ENOMEM);
4278 int tracing_open_generic(struct inode *inode, struct file *filp)
4282 ret = tracing_check_open_get_tr(NULL);
4286 filp->private_data = inode->i_private;
4290 bool tracing_is_disabled(void)
4292 return (tracing_disabled) ? true: false;
4296 * Open and update trace_array ref count.
4297 * Must have the current trace_array passed to it.
4299 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4301 struct trace_array *tr = inode->i_private;
4304 ret = tracing_check_open_get_tr(tr);
4308 filp->private_data = inode->i_private;
4313 static int tracing_release(struct inode *inode, struct file *file)
4315 struct trace_array *tr = inode->i_private;
4316 struct seq_file *m = file->private_data;
4317 struct trace_iterator *iter;
4320 if (!(file->f_mode & FMODE_READ)) {
4321 trace_array_put(tr);
4325 /* Writes do not use seq_file */
4327 mutex_lock(&trace_types_lock);
4329 for_each_tracing_cpu(cpu) {
4330 if (iter->buffer_iter[cpu])
4331 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4334 if (iter->trace && iter->trace->close)
4335 iter->trace->close(iter);
4337 if (!iter->snapshot)
4338 /* reenable tracing if it was previously enabled */
4339 tracing_start_tr(tr);
4341 __trace_array_put(tr);
4343 mutex_unlock(&trace_types_lock);
4345 mutex_destroy(&iter->mutex);
4346 free_cpumask_var(iter->started);
4348 kfree(iter->buffer_iter);
4349 seq_release_private(inode, file);
4354 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4356 struct trace_array *tr = inode->i_private;
4358 trace_array_put(tr);
4362 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4364 struct trace_array *tr = inode->i_private;
4366 trace_array_put(tr);
4368 return single_release(inode, file);
4371 static int tracing_open(struct inode *inode, struct file *file)
4373 struct trace_array *tr = inode->i_private;
4374 struct trace_iterator *iter;
4377 ret = tracing_check_open_get_tr(tr);
4381 /* If this file was open for write, then erase contents */
4382 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4383 int cpu = tracing_get_cpu(inode);
4384 struct array_buffer *trace_buf = &tr->array_buffer;
4386 #ifdef CONFIG_TRACER_MAX_TRACE
4387 if (tr->current_trace->print_max)
4388 trace_buf = &tr->max_buffer;
4391 if (cpu == RING_BUFFER_ALL_CPUS)
4392 tracing_reset_online_cpus(trace_buf);
4394 tracing_reset_cpu(trace_buf, cpu);
4397 if (file->f_mode & FMODE_READ) {
4398 iter = __tracing_open(inode, file, false);
4400 ret = PTR_ERR(iter);
4401 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4402 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4406 trace_array_put(tr);
4412 * Some tracers are not suitable for instance buffers.
4413 * A tracer is always available for the global array (toplevel)
4414 * or if it explicitly states that it is.
4417 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4419 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4422 /* Find the next tracer that this trace array may use */
4423 static struct tracer *
4424 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4426 while (t && !trace_ok_for_array(t, tr))
4433 t_next(struct seq_file *m, void *v, loff_t *pos)
4435 struct trace_array *tr = m->private;
4436 struct tracer *t = v;
4441 t = get_tracer_for_array(tr, t->next);
4446 static void *t_start(struct seq_file *m, loff_t *pos)
4448 struct trace_array *tr = m->private;
4452 mutex_lock(&trace_types_lock);
4454 t = get_tracer_for_array(tr, trace_types);
4455 for (; t && l < *pos; t = t_next(m, t, &l))
4461 static void t_stop(struct seq_file *m, void *p)
4463 mutex_unlock(&trace_types_lock);
4466 static int t_show(struct seq_file *m, void *v)
4468 struct tracer *t = v;
4473 seq_puts(m, t->name);
4482 static const struct seq_operations show_traces_seq_ops = {
4489 static int show_traces_open(struct inode *inode, struct file *file)
4491 struct trace_array *tr = inode->i_private;
4495 ret = tracing_check_open_get_tr(tr);
4499 ret = seq_open(file, &show_traces_seq_ops);
4501 trace_array_put(tr);
4505 m = file->private_data;
4511 static int show_traces_release(struct inode *inode, struct file *file)
4513 struct trace_array *tr = inode->i_private;
4515 trace_array_put(tr);
4516 return seq_release(inode, file);
4520 tracing_write_stub(struct file *filp, const char __user *ubuf,
4521 size_t count, loff_t *ppos)
4526 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4530 if (file->f_mode & FMODE_READ)
4531 ret = seq_lseek(file, offset, whence);
4533 file->f_pos = ret = 0;
4538 static const struct file_operations tracing_fops = {
4539 .open = tracing_open,
4541 .write = tracing_write_stub,
4542 .llseek = tracing_lseek,
4543 .release = tracing_release,
4546 static const struct file_operations show_traces_fops = {
4547 .open = show_traces_open,
4549 .llseek = seq_lseek,
4550 .release = show_traces_release,
4554 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4555 size_t count, loff_t *ppos)
4557 struct trace_array *tr = file_inode(filp)->i_private;
4561 len = snprintf(NULL, 0, "%*pb\n",
4562 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4563 mask_str = kmalloc(len, GFP_KERNEL);
4567 len = snprintf(mask_str, len, "%*pb\n",
4568 cpumask_pr_args(tr->tracing_cpumask));
4573 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4581 int tracing_set_cpumask(struct trace_array *tr,
4582 cpumask_var_t tracing_cpumask_new)
4589 local_irq_disable();
4590 arch_spin_lock(&tr->max_lock);
4591 for_each_tracing_cpu(cpu) {
4593 * Increase/decrease the disabled counter if we are
4594 * about to flip a bit in the cpumask:
4596 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4597 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4598 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4599 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4601 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4602 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4603 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4604 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4607 arch_spin_unlock(&tr->max_lock);
4610 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4616 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4617 size_t count, loff_t *ppos)
4619 struct trace_array *tr = file_inode(filp)->i_private;
4620 cpumask_var_t tracing_cpumask_new;
4623 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4626 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4630 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4634 free_cpumask_var(tracing_cpumask_new);
4639 free_cpumask_var(tracing_cpumask_new);
4644 static const struct file_operations tracing_cpumask_fops = {
4645 .open = tracing_open_generic_tr,
4646 .read = tracing_cpumask_read,
4647 .write = tracing_cpumask_write,
4648 .release = tracing_release_generic_tr,
4649 .llseek = generic_file_llseek,
4652 static int tracing_trace_options_show(struct seq_file *m, void *v)
4654 struct tracer_opt *trace_opts;
4655 struct trace_array *tr = m->private;
4659 mutex_lock(&trace_types_lock);
4660 tracer_flags = tr->current_trace->flags->val;
4661 trace_opts = tr->current_trace->flags->opts;
4663 for (i = 0; trace_options[i]; i++) {
4664 if (tr->trace_flags & (1 << i))
4665 seq_printf(m, "%s\n", trace_options[i]);
4667 seq_printf(m, "no%s\n", trace_options[i]);
4670 for (i = 0; trace_opts[i].name; i++) {
4671 if (tracer_flags & trace_opts[i].bit)
4672 seq_printf(m, "%s\n", trace_opts[i].name);
4674 seq_printf(m, "no%s\n", trace_opts[i].name);
4676 mutex_unlock(&trace_types_lock);
4681 static int __set_tracer_option(struct trace_array *tr,
4682 struct tracer_flags *tracer_flags,
4683 struct tracer_opt *opts, int neg)
4685 struct tracer *trace = tracer_flags->trace;
4688 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4693 tracer_flags->val &= ~opts->bit;
4695 tracer_flags->val |= opts->bit;
4699 /* Try to assign a tracer specific option */
4700 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4702 struct tracer *trace = tr->current_trace;
4703 struct tracer_flags *tracer_flags = trace->flags;
4704 struct tracer_opt *opts = NULL;
4707 for (i = 0; tracer_flags->opts[i].name; i++) {
4708 opts = &tracer_flags->opts[i];
4710 if (strcmp(cmp, opts->name) == 0)
4711 return __set_tracer_option(tr, trace->flags, opts, neg);
4717 /* Some tracers require overwrite to stay enabled */
4718 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4720 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4726 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4728 if ((mask == TRACE_ITER_RECORD_TGID) ||
4729 (mask == TRACE_ITER_RECORD_CMD))
4730 lockdep_assert_held(&event_mutex);
4732 /* do nothing if flag is already set */
4733 if (!!(tr->trace_flags & mask) == !!enabled)
4736 /* Give the tracer a chance to approve the change */
4737 if (tr->current_trace->flag_changed)
4738 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4742 tr->trace_flags |= mask;
4744 tr->trace_flags &= ~mask;
4746 if (mask == TRACE_ITER_RECORD_CMD)
4747 trace_event_enable_cmd_record(enabled);
4749 if (mask == TRACE_ITER_RECORD_TGID) {
4751 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
4755 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4759 trace_event_enable_tgid_record(enabled);
4762 if (mask == TRACE_ITER_EVENT_FORK)
4763 trace_event_follow_fork(tr, enabled);
4765 if (mask == TRACE_ITER_FUNC_FORK)
4766 ftrace_pid_follow_fork(tr, enabled);
4768 if (mask == TRACE_ITER_OVERWRITE) {
4769 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
4770 #ifdef CONFIG_TRACER_MAX_TRACE
4771 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4775 if (mask == TRACE_ITER_PRINTK) {
4776 trace_printk_start_stop_comm(enabled);
4777 trace_printk_control(enabled);
4783 int trace_set_options(struct trace_array *tr, char *option)
4788 size_t orig_len = strlen(option);
4791 cmp = strstrip(option);
4793 len = str_has_prefix(cmp, "no");
4799 mutex_lock(&event_mutex);
4800 mutex_lock(&trace_types_lock);
4802 ret = match_string(trace_options, -1, cmp);
4803 /* If no option could be set, test the specific tracer options */
4805 ret = set_tracer_option(tr, cmp, neg);
4807 ret = set_tracer_flag(tr, 1 << ret, !neg);
4809 mutex_unlock(&trace_types_lock);
4810 mutex_unlock(&event_mutex);
4813 * If the first trailing whitespace is replaced with '\0' by strstrip,
4814 * turn it back into a space.
4816 if (orig_len > strlen(option))
4817 option[strlen(option)] = ' ';
4822 static void __init apply_trace_boot_options(void)
4824 char *buf = trace_boot_options_buf;
4828 option = strsep(&buf, ",");
4834 trace_set_options(&global_trace, option);
4836 /* Put back the comma to allow this to be called again */
4843 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4844 size_t cnt, loff_t *ppos)
4846 struct seq_file *m = filp->private_data;
4847 struct trace_array *tr = m->private;
4851 if (cnt >= sizeof(buf))
4854 if (copy_from_user(buf, ubuf, cnt))
4859 ret = trace_set_options(tr, buf);
4868 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4870 struct trace_array *tr = inode->i_private;
4873 ret = tracing_check_open_get_tr(tr);
4877 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4879 trace_array_put(tr);
4884 static const struct file_operations tracing_iter_fops = {
4885 .open = tracing_trace_options_open,
4887 .llseek = seq_lseek,
4888 .release = tracing_single_release_tr,
4889 .write = tracing_trace_options_write,
4892 static const char readme_msg[] =
4893 "tracing mini-HOWTO:\n\n"
4894 "# echo 0 > tracing_on : quick way to disable tracing\n"
4895 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4896 " Important files:\n"
4897 " trace\t\t\t- The static contents of the buffer\n"
4898 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4899 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4900 " current_tracer\t- function and latency tracers\n"
4901 " available_tracers\t- list of configured tracers for current_tracer\n"
4902 " error_log\t- error log for failed commands (that support it)\n"
4903 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4904 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4905 " trace_clock\t\t-change the clock used to order events\n"
4906 " local: Per cpu clock but may not be synced across CPUs\n"
4907 " global: Synced across CPUs but slows tracing down.\n"
4908 " counter: Not a clock, but just an increment\n"
4909 " uptime: Jiffy counter from time of boot\n"
4910 " perf: Same clock that perf events use\n"
4911 #ifdef CONFIG_X86_64
4912 " x86-tsc: TSC cycle counter\n"
4914 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4915 " delta: Delta difference against a buffer-wide timestamp\n"
4916 " absolute: Absolute (standalone) timestamp\n"
4917 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4918 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4919 " tracing_cpumask\t- Limit which CPUs to trace\n"
4920 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4921 "\t\t\t Remove sub-buffer with rmdir\n"
4922 " trace_options\t\t- Set format or modify how tracing happens\n"
4923 "\t\t\t Disable an option by prefixing 'no' to the\n"
4924 "\t\t\t option name\n"
4925 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4926 #ifdef CONFIG_DYNAMIC_FTRACE
4927 "\n available_filter_functions - list of functions that can be filtered on\n"
4928 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4929 "\t\t\t functions\n"
4930 "\t accepts: func_full_name or glob-matching-pattern\n"
4931 "\t modules: Can select a group via module\n"
4932 "\t Format: :mod:<module-name>\n"
4933 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4934 "\t triggers: a command to perform when function is hit\n"
4935 "\t Format: <function>:<trigger>[:count]\n"
4936 "\t trigger: traceon, traceoff\n"
4937 "\t\t enable_event:<system>:<event>\n"
4938 "\t\t disable_event:<system>:<event>\n"
4939 #ifdef CONFIG_STACKTRACE
4942 #ifdef CONFIG_TRACER_SNAPSHOT
4947 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4948 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4949 "\t The first one will disable tracing every time do_fault is hit\n"
4950 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4951 "\t The first time do trap is hit and it disables tracing, the\n"
4952 "\t counter will decrement to 2. If tracing is already disabled,\n"
4953 "\t the counter will not decrement. It only decrements when the\n"
4954 "\t trigger did work\n"
4955 "\t To remove trigger without count:\n"
4956 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4957 "\t To remove trigger with a count:\n"
4958 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4959 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4960 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4961 "\t modules: Can select a group via module command :mod:\n"
4962 "\t Does not accept triggers\n"
4963 #endif /* CONFIG_DYNAMIC_FTRACE */
4964 #ifdef CONFIG_FUNCTION_TRACER
4965 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4968 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4969 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4970 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4971 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4973 #ifdef CONFIG_TRACER_SNAPSHOT
4974 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4975 "\t\t\t snapshot buffer. Read the contents for more\n"
4976 "\t\t\t information\n"
4978 #ifdef CONFIG_STACK_TRACER
4979 " stack_trace\t\t- Shows the max stack trace when active\n"
4980 " stack_max_size\t- Shows current max stack size that was traced\n"
4981 "\t\t\t Write into this file to reset the max size (trigger a\n"
4982 "\t\t\t new trace)\n"
4983 #ifdef CONFIG_DYNAMIC_FTRACE
4984 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4987 #endif /* CONFIG_STACK_TRACER */
4988 #ifdef CONFIG_DYNAMIC_EVENTS
4989 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
4990 "\t\t\t Write into this file to define/undefine new trace events.\n"
4992 #ifdef CONFIG_KPROBE_EVENTS
4993 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
4994 "\t\t\t Write into this file to define/undefine new trace events.\n"
4996 #ifdef CONFIG_UPROBE_EVENTS
4997 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
4998 "\t\t\t Write into this file to define/undefine new trace events.\n"
5000 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5001 "\t accepts: event-definitions (one definition per line)\n"
5002 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5003 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5004 #ifdef CONFIG_HIST_TRIGGERS
5005 "\t s:[synthetic/]<event> <field> [<field>]\n"
5007 "\t -:[<group>/]<event>\n"
5008 #ifdef CONFIG_KPROBE_EVENTS
5009 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5010 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5012 #ifdef CONFIG_UPROBE_EVENTS
5013 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
5015 "\t args: <name>=fetcharg[:type]\n"
5016 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5017 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5018 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5020 "\t $stack<index>, $stack, $retval, $comm,\n"
5022 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5023 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5024 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5025 "\t <type>\\[<array-size>\\]\n"
5026 #ifdef CONFIG_HIST_TRIGGERS
5027 "\t field: <stype> <name>;\n"
5028 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5029 "\t [unsigned] char/int/long\n"
5032 " events/\t\t- Directory containing all trace event subsystems:\n"
5033 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5034 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5035 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5037 " filter\t\t- If set, only events passing filter are traced\n"
5038 " events/<system>/<event>/\t- Directory containing control files for\n"
5040 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5041 " filter\t\t- If set, only events passing filter are traced\n"
5042 " trigger\t\t- If set, a command to perform when event is hit\n"
5043 "\t Format: <trigger>[:count][if <filter>]\n"
5044 "\t trigger: traceon, traceoff\n"
5045 "\t enable_event:<system>:<event>\n"
5046 "\t disable_event:<system>:<event>\n"
5047 #ifdef CONFIG_HIST_TRIGGERS
5048 "\t enable_hist:<system>:<event>\n"
5049 "\t disable_hist:<system>:<event>\n"
5051 #ifdef CONFIG_STACKTRACE
5054 #ifdef CONFIG_TRACER_SNAPSHOT
5057 #ifdef CONFIG_HIST_TRIGGERS
5058 "\t\t hist (see below)\n"
5060 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5061 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5062 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5063 "\t events/block/block_unplug/trigger\n"
5064 "\t The first disables tracing every time block_unplug is hit.\n"
5065 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5066 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5067 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5068 "\t Like function triggers, the counter is only decremented if it\n"
5069 "\t enabled or disabled tracing.\n"
5070 "\t To remove a trigger without a count:\n"
5071 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5072 "\t To remove a trigger with a count:\n"
5073 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5074 "\t Filters can be ignored when removing a trigger.\n"
5075 #ifdef CONFIG_HIST_TRIGGERS
5076 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5077 "\t Format: hist:keys=<field1[,field2,...]>\n"
5078 "\t [:values=<field1[,field2,...]>]\n"
5079 "\t [:sort=<field1[,field2,...]>]\n"
5080 "\t [:size=#entries]\n"
5081 "\t [:pause][:continue][:clear]\n"
5082 "\t [:name=histname1]\n"
5083 "\t [:<handler>.<action>]\n"
5084 "\t [if <filter>]\n\n"
5085 "\t When a matching event is hit, an entry is added to a hash\n"
5086 "\t table using the key(s) and value(s) named, and the value of a\n"
5087 "\t sum called 'hitcount' is incremented. Keys and values\n"
5088 "\t correspond to fields in the event's format description. Keys\n"
5089 "\t can be any field, or the special string 'stacktrace'.\n"
5090 "\t Compound keys consisting of up to two fields can be specified\n"
5091 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5092 "\t fields. Sort keys consisting of up to two fields can be\n"
5093 "\t specified using the 'sort' keyword. The sort direction can\n"
5094 "\t be modified by appending '.descending' or '.ascending' to a\n"
5095 "\t sort field. The 'size' parameter can be used to specify more\n"
5096 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5097 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5098 "\t its histogram data will be shared with other triggers of the\n"
5099 "\t same name, and trigger hits will update this common data.\n\n"
5100 "\t Reading the 'hist' file for the event will dump the hash\n"
5101 "\t table in its entirety to stdout. If there are multiple hist\n"
5102 "\t triggers attached to an event, there will be a table for each\n"
5103 "\t trigger in the output. The table displayed for a named\n"
5104 "\t trigger will be the same as any other instance having the\n"
5105 "\t same name. The default format used to display a given field\n"
5106 "\t can be modified by appending any of the following modifiers\n"
5107 "\t to the field name, as applicable:\n\n"
5108 "\t .hex display a number as a hex value\n"
5109 "\t .sym display an address as a symbol\n"
5110 "\t .sym-offset display an address as a symbol and offset\n"
5111 "\t .execname display a common_pid as a program name\n"
5112 "\t .syscall display a syscall id as a syscall name\n"
5113 "\t .log2 display log2 value rather than raw number\n"
5114 "\t .usecs display a common_timestamp in microseconds\n\n"
5115 "\t The 'pause' parameter can be used to pause an existing hist\n"
5116 "\t trigger or to start a hist trigger but not log any events\n"
5117 "\t until told to do so. 'continue' can be used to start or\n"
5118 "\t restart a paused hist trigger.\n\n"
5119 "\t The 'clear' parameter will clear the contents of a running\n"
5120 "\t hist trigger and leave its current paused/active state\n"
5122 "\t The enable_hist and disable_hist triggers can be used to\n"
5123 "\t have one event conditionally start and stop another event's\n"
5124 "\t already-attached hist trigger. The syntax is analogous to\n"
5125 "\t the enable_event and disable_event triggers.\n\n"
5126 "\t Hist trigger handlers and actions are executed whenever a\n"
5127 "\t a histogram entry is added or updated. They take the form:\n\n"
5128 "\t <handler>.<action>\n\n"
5129 "\t The available handlers are:\n\n"
5130 "\t onmatch(matching.event) - invoke on addition or update\n"
5131 "\t onmax(var) - invoke if var exceeds current max\n"
5132 "\t onchange(var) - invoke action if var changes\n\n"
5133 "\t The available actions are:\n\n"
5134 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5135 "\t save(field,...) - save current event fields\n"
5136 #ifdef CONFIG_TRACER_SNAPSHOT
5137 "\t snapshot() - snapshot the trace buffer\n"
5143 tracing_readme_read(struct file *filp, char __user *ubuf,
5144 size_t cnt, loff_t *ppos)
5146 return simple_read_from_buffer(ubuf, cnt, ppos,
5147 readme_msg, strlen(readme_msg));
5150 static const struct file_operations tracing_readme_fops = {
5151 .open = tracing_open_generic,
5152 .read = tracing_readme_read,
5153 .llseek = generic_file_llseek,
5156 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5160 if (*pos || m->count)
5165 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5166 if (trace_find_tgid(*ptr))
5173 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5183 v = saved_tgids_next(m, v, &l);
5191 static void saved_tgids_stop(struct seq_file *m, void *v)
5195 static int saved_tgids_show(struct seq_file *m, void *v)
5197 int pid = (int *)v - tgid_map;
5199 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5203 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5204 .start = saved_tgids_start,
5205 .stop = saved_tgids_stop,
5206 .next = saved_tgids_next,
5207 .show = saved_tgids_show,
5210 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5214 ret = tracing_check_open_get_tr(NULL);
5218 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5222 static const struct file_operations tracing_saved_tgids_fops = {
5223 .open = tracing_saved_tgids_open,
5225 .llseek = seq_lseek,
5226 .release = seq_release,
5229 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5231 unsigned int *ptr = v;
5233 if (*pos || m->count)
5238 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5240 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5249 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5255 arch_spin_lock(&trace_cmdline_lock);
5257 v = &savedcmd->map_cmdline_to_pid[0];
5259 v = saved_cmdlines_next(m, v, &l);
5267 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5269 arch_spin_unlock(&trace_cmdline_lock);
5273 static int saved_cmdlines_show(struct seq_file *m, void *v)
5275 char buf[TASK_COMM_LEN];
5276 unsigned int *pid = v;
5278 __trace_find_cmdline(*pid, buf);
5279 seq_printf(m, "%d %s\n", *pid, buf);
5283 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5284 .start = saved_cmdlines_start,
5285 .next = saved_cmdlines_next,
5286 .stop = saved_cmdlines_stop,
5287 .show = saved_cmdlines_show,
5290 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5294 ret = tracing_check_open_get_tr(NULL);
5298 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5301 static const struct file_operations tracing_saved_cmdlines_fops = {
5302 .open = tracing_saved_cmdlines_open,
5304 .llseek = seq_lseek,
5305 .release = seq_release,
5309 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5310 size_t cnt, loff_t *ppos)
5315 arch_spin_lock(&trace_cmdline_lock);
5316 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5317 arch_spin_unlock(&trace_cmdline_lock);
5319 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5322 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5324 kfree(s->saved_cmdlines);
5325 kfree(s->map_cmdline_to_pid);
5329 static int tracing_resize_saved_cmdlines(unsigned int val)
5331 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5333 s = kmalloc(sizeof(*s), GFP_KERNEL);
5337 if (allocate_cmdlines_buffer(val, s) < 0) {
5342 arch_spin_lock(&trace_cmdline_lock);
5343 savedcmd_temp = savedcmd;
5345 arch_spin_unlock(&trace_cmdline_lock);
5346 free_saved_cmdlines_buffer(savedcmd_temp);
5352 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5353 size_t cnt, loff_t *ppos)
5358 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5362 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5363 if (!val || val > PID_MAX_DEFAULT)
5366 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5375 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5376 .open = tracing_open_generic,
5377 .read = tracing_saved_cmdlines_size_read,
5378 .write = tracing_saved_cmdlines_size_write,
5381 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5382 static union trace_eval_map_item *
5383 update_eval_map(union trace_eval_map_item *ptr)
5385 if (!ptr->map.eval_string) {
5386 if (ptr->tail.next) {
5387 ptr = ptr->tail.next;
5388 /* Set ptr to the next real item (skip head) */
5396 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5398 union trace_eval_map_item *ptr = v;
5401 * Paranoid! If ptr points to end, we don't want to increment past it.
5402 * This really should never happen.
5405 ptr = update_eval_map(ptr);
5406 if (WARN_ON_ONCE(!ptr))
5410 ptr = update_eval_map(ptr);
5415 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5417 union trace_eval_map_item *v;
5420 mutex_lock(&trace_eval_mutex);
5422 v = trace_eval_maps;
5426 while (v && l < *pos) {
5427 v = eval_map_next(m, v, &l);
5433 static void eval_map_stop(struct seq_file *m, void *v)
5435 mutex_unlock(&trace_eval_mutex);
5438 static int eval_map_show(struct seq_file *m, void *v)
5440 union trace_eval_map_item *ptr = v;
5442 seq_printf(m, "%s %ld (%s)\n",
5443 ptr->map.eval_string, ptr->map.eval_value,
5449 static const struct seq_operations tracing_eval_map_seq_ops = {
5450 .start = eval_map_start,
5451 .next = eval_map_next,
5452 .stop = eval_map_stop,
5453 .show = eval_map_show,
5456 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5460 ret = tracing_check_open_get_tr(NULL);
5464 return seq_open(filp, &tracing_eval_map_seq_ops);
5467 static const struct file_operations tracing_eval_map_fops = {
5468 .open = tracing_eval_map_open,
5470 .llseek = seq_lseek,
5471 .release = seq_release,
5474 static inline union trace_eval_map_item *
5475 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5477 /* Return tail of array given the head */
5478 return ptr + ptr->head.length + 1;
5482 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5485 struct trace_eval_map **stop;
5486 struct trace_eval_map **map;
5487 union trace_eval_map_item *map_array;
5488 union trace_eval_map_item *ptr;
5493 * The trace_eval_maps contains the map plus a head and tail item,
5494 * where the head holds the module and length of array, and the
5495 * tail holds a pointer to the next list.
5497 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5499 pr_warn("Unable to allocate trace eval mapping\n");
5503 mutex_lock(&trace_eval_mutex);
5505 if (!trace_eval_maps)
5506 trace_eval_maps = map_array;
5508 ptr = trace_eval_maps;
5510 ptr = trace_eval_jmp_to_tail(ptr);
5511 if (!ptr->tail.next)
5513 ptr = ptr->tail.next;
5516 ptr->tail.next = map_array;
5518 map_array->head.mod = mod;
5519 map_array->head.length = len;
5522 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5523 map_array->map = **map;
5526 memset(map_array, 0, sizeof(*map_array));
5528 mutex_unlock(&trace_eval_mutex);
5531 static void trace_create_eval_file(struct dentry *d_tracer)
5533 trace_create_file("eval_map", 0444, d_tracer,
5534 NULL, &tracing_eval_map_fops);
5537 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5538 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5539 static inline void trace_insert_eval_map_file(struct module *mod,
5540 struct trace_eval_map **start, int len) { }
5541 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5543 static void trace_insert_eval_map(struct module *mod,
5544 struct trace_eval_map **start, int len)
5546 struct trace_eval_map **map;
5553 trace_event_eval_update(map, len);
5555 trace_insert_eval_map_file(mod, start, len);
5559 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5560 size_t cnt, loff_t *ppos)
5562 struct trace_array *tr = filp->private_data;
5563 char buf[MAX_TRACER_SIZE+2];
5566 mutex_lock(&trace_types_lock);
5567 r = sprintf(buf, "%s\n", tr->current_trace->name);
5568 mutex_unlock(&trace_types_lock);
5570 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5573 int tracer_init(struct tracer *t, struct trace_array *tr)
5575 tracing_reset_online_cpus(&tr->array_buffer);
5579 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5583 for_each_tracing_cpu(cpu)
5584 per_cpu_ptr(buf->data, cpu)->entries = val;
5587 #ifdef CONFIG_TRACER_MAX_TRACE
5588 /* resize @tr's buffer to the size of @size_tr's entries */
5589 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5590 struct array_buffer *size_buf, int cpu_id)
5594 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5595 for_each_tracing_cpu(cpu) {
5596 ret = ring_buffer_resize(trace_buf->buffer,
5597 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5600 per_cpu_ptr(trace_buf->data, cpu)->entries =
5601 per_cpu_ptr(size_buf->data, cpu)->entries;
5604 ret = ring_buffer_resize(trace_buf->buffer,
5605 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5607 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5608 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5613 #endif /* CONFIG_TRACER_MAX_TRACE */
5615 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5616 unsigned long size, int cpu)
5621 * If kernel or user changes the size of the ring buffer
5622 * we use the size that was given, and we can forget about
5623 * expanding it later.
5625 ring_buffer_expanded = true;
5627 /* May be called before buffers are initialized */
5628 if (!tr->array_buffer.buffer)
5631 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5635 #ifdef CONFIG_TRACER_MAX_TRACE
5636 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5637 !tr->current_trace->use_max_tr)
5640 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5642 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5643 &tr->array_buffer, cpu);
5646 * AARGH! We are left with different
5647 * size max buffer!!!!
5648 * The max buffer is our "snapshot" buffer.
5649 * When a tracer needs a snapshot (one of the
5650 * latency tracers), it swaps the max buffer
5651 * with the saved snap shot. We succeeded to
5652 * update the size of the main buffer, but failed to
5653 * update the size of the max buffer. But when we tried
5654 * to reset the main buffer to the original size, we
5655 * failed there too. This is very unlikely to
5656 * happen, but if it does, warn and kill all
5660 tracing_disabled = 1;
5665 if (cpu == RING_BUFFER_ALL_CPUS)
5666 set_buffer_entries(&tr->max_buffer, size);
5668 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5671 #endif /* CONFIG_TRACER_MAX_TRACE */
5673 if (cpu == RING_BUFFER_ALL_CPUS)
5674 set_buffer_entries(&tr->array_buffer, size);
5676 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
5681 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5682 unsigned long size, int cpu_id)
5686 mutex_lock(&trace_types_lock);
5688 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5689 /* make sure, this cpu is enabled in the mask */
5690 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5696 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5701 mutex_unlock(&trace_types_lock);
5708 * tracing_update_buffers - used by tracing facility to expand ring buffers
5710 * To save on memory when the tracing is never used on a system with it
5711 * configured in. The ring buffers are set to a minimum size. But once
5712 * a user starts to use the tracing facility, then they need to grow
5713 * to their default size.
5715 * This function is to be called when a tracer is about to be used.
5717 int tracing_update_buffers(void)
5721 mutex_lock(&trace_types_lock);
5722 if (!ring_buffer_expanded)
5723 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5724 RING_BUFFER_ALL_CPUS);
5725 mutex_unlock(&trace_types_lock);
5730 struct trace_option_dentry;
5733 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5736 * Used to clear out the tracer before deletion of an instance.
5737 * Must have trace_types_lock held.
5739 static void tracing_set_nop(struct trace_array *tr)
5741 if (tr->current_trace == &nop_trace)
5744 tr->current_trace->enabled--;
5746 if (tr->current_trace->reset)
5747 tr->current_trace->reset(tr);
5749 tr->current_trace = &nop_trace;
5752 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5754 /* Only enable if the directory has been created already. */
5758 create_trace_option_files(tr, t);
5761 int tracing_set_tracer(struct trace_array *tr, const char *buf)
5764 #ifdef CONFIG_TRACER_MAX_TRACE
5769 mutex_lock(&trace_types_lock);
5771 if (!ring_buffer_expanded) {
5772 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5773 RING_BUFFER_ALL_CPUS);
5779 for (t = trace_types; t; t = t->next) {
5780 if (strcmp(t->name, buf) == 0)
5787 if (t == tr->current_trace)
5790 #ifdef CONFIG_TRACER_SNAPSHOT
5791 if (t->use_max_tr) {
5792 arch_spin_lock(&tr->max_lock);
5793 if (tr->cond_snapshot)
5795 arch_spin_unlock(&tr->max_lock);
5800 /* Some tracers won't work on kernel command line */
5801 if (system_state < SYSTEM_RUNNING && t->noboot) {
5802 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5807 /* Some tracers are only allowed for the top level buffer */
5808 if (!trace_ok_for_array(t, tr)) {
5813 /* If trace pipe files are being read, we can't change the tracer */
5814 if (tr->current_trace->ref) {
5819 trace_branch_disable();
5821 tr->current_trace->enabled--;
5823 if (tr->current_trace->reset)
5824 tr->current_trace->reset(tr);
5826 /* Current trace needs to be nop_trace before synchronize_rcu */
5827 tr->current_trace = &nop_trace;
5829 #ifdef CONFIG_TRACER_MAX_TRACE
5830 had_max_tr = tr->allocated_snapshot;
5832 if (had_max_tr && !t->use_max_tr) {
5834 * We need to make sure that the update_max_tr sees that
5835 * current_trace changed to nop_trace to keep it from
5836 * swapping the buffers after we resize it.
5837 * The update_max_tr is called from interrupts disabled
5838 * so a synchronized_sched() is sufficient.
5845 #ifdef CONFIG_TRACER_MAX_TRACE
5846 if (t->use_max_tr && !had_max_tr) {
5847 ret = tracing_alloc_snapshot_instance(tr);
5854 ret = tracer_init(t, tr);
5859 tr->current_trace = t;
5860 tr->current_trace->enabled++;
5861 trace_branch_enable(tr);
5863 mutex_unlock(&trace_types_lock);
5869 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5870 size_t cnt, loff_t *ppos)
5872 struct trace_array *tr = filp->private_data;
5873 char buf[MAX_TRACER_SIZE+1];
5880 if (cnt > MAX_TRACER_SIZE)
5881 cnt = MAX_TRACER_SIZE;
5883 if (copy_from_user(buf, ubuf, cnt))
5888 /* strip ending whitespace. */
5889 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5892 err = tracing_set_tracer(tr, buf);
5902 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5903 size_t cnt, loff_t *ppos)
5908 r = snprintf(buf, sizeof(buf), "%ld\n",
5909 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5910 if (r > sizeof(buf))
5912 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5916 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5917 size_t cnt, loff_t *ppos)
5922 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5932 tracing_thresh_read(struct file *filp, char __user *ubuf,
5933 size_t cnt, loff_t *ppos)
5935 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5939 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5940 size_t cnt, loff_t *ppos)
5942 struct trace_array *tr = filp->private_data;
5945 mutex_lock(&trace_types_lock);
5946 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5950 if (tr->current_trace->update_thresh) {
5951 ret = tr->current_trace->update_thresh(tr);
5958 mutex_unlock(&trace_types_lock);
5963 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5966 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5967 size_t cnt, loff_t *ppos)
5969 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5973 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5974 size_t cnt, loff_t *ppos)
5976 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5981 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5983 struct trace_array *tr = inode->i_private;
5984 struct trace_iterator *iter;
5987 ret = tracing_check_open_get_tr(tr);
5991 mutex_lock(&trace_types_lock);
5993 /* create a buffer to store the information to pass to userspace */
5994 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5997 __trace_array_put(tr);
6001 trace_seq_init(&iter->seq);
6002 iter->trace = tr->current_trace;
6004 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6009 /* trace pipe does not show start of buffer */
6010 cpumask_setall(iter->started);
6012 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6013 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6015 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6016 if (trace_clocks[tr->clock_id].in_ns)
6017 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6020 iter->array_buffer = &tr->array_buffer;
6021 iter->cpu_file = tracing_get_cpu(inode);
6022 mutex_init(&iter->mutex);
6023 filp->private_data = iter;
6025 if (iter->trace->pipe_open)
6026 iter->trace->pipe_open(iter);
6028 nonseekable_open(inode, filp);
6030 tr->current_trace->ref++;
6032 mutex_unlock(&trace_types_lock);
6037 __trace_array_put(tr);
6038 mutex_unlock(&trace_types_lock);
6042 static int tracing_release_pipe(struct inode *inode, struct file *file)
6044 struct trace_iterator *iter = file->private_data;
6045 struct trace_array *tr = inode->i_private;
6047 mutex_lock(&trace_types_lock);
6049 tr->current_trace->ref--;
6051 if (iter->trace->pipe_close)
6052 iter->trace->pipe_close(iter);
6054 mutex_unlock(&trace_types_lock);
6056 free_cpumask_var(iter->started);
6057 mutex_destroy(&iter->mutex);
6060 trace_array_put(tr);
6066 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6068 struct trace_array *tr = iter->tr;
6070 /* Iterators are static, they should be filled or empty */
6071 if (trace_buffer_iter(iter, iter->cpu_file))
6072 return EPOLLIN | EPOLLRDNORM;
6074 if (tr->trace_flags & TRACE_ITER_BLOCK)
6076 * Always select as readable when in blocking mode
6078 return EPOLLIN | EPOLLRDNORM;
6080 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6085 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6087 struct trace_iterator *iter = filp->private_data;
6089 return trace_poll(iter, filp, poll_table);
6092 /* Must be called with iter->mutex held. */
6093 static int tracing_wait_pipe(struct file *filp)
6095 struct trace_iterator *iter = filp->private_data;
6098 while (trace_empty(iter)) {
6100 if ((filp->f_flags & O_NONBLOCK)) {
6105 * We block until we read something and tracing is disabled.
6106 * We still block if tracing is disabled, but we have never
6107 * read anything. This allows a user to cat this file, and
6108 * then enable tracing. But after we have read something,
6109 * we give an EOF when tracing is again disabled.
6111 * iter->pos will be 0 if we haven't read anything.
6113 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6116 mutex_unlock(&iter->mutex);
6118 ret = wait_on_pipe(iter, 0);
6120 mutex_lock(&iter->mutex);
6133 tracing_read_pipe(struct file *filp, char __user *ubuf,
6134 size_t cnt, loff_t *ppos)
6136 struct trace_iterator *iter = filp->private_data;
6140 * Avoid more than one consumer on a single file descriptor
6141 * This is just a matter of traces coherency, the ring buffer itself
6144 mutex_lock(&iter->mutex);
6146 /* return any leftover data */
6147 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6151 trace_seq_init(&iter->seq);
6153 if (iter->trace->read) {
6154 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6160 sret = tracing_wait_pipe(filp);
6164 /* stop when tracing is finished */
6165 if (trace_empty(iter)) {
6170 if (cnt >= PAGE_SIZE)
6171 cnt = PAGE_SIZE - 1;
6173 /* reset all but tr, trace, and overruns */
6174 memset(&iter->seq, 0,
6175 sizeof(struct trace_iterator) -
6176 offsetof(struct trace_iterator, seq));
6177 cpumask_clear(iter->started);
6178 trace_seq_init(&iter->seq);
6181 trace_event_read_lock();
6182 trace_access_lock(iter->cpu_file);
6183 while (trace_find_next_entry_inc(iter) != NULL) {
6184 enum print_line_t ret;
6185 int save_len = iter->seq.seq.len;
6187 ret = print_trace_line(iter);
6188 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6189 /* don't print partial lines */
6190 iter->seq.seq.len = save_len;
6193 if (ret != TRACE_TYPE_NO_CONSUME)
6194 trace_consume(iter);
6196 if (trace_seq_used(&iter->seq) >= cnt)
6200 * Setting the full flag means we reached the trace_seq buffer
6201 * size and we should leave by partial output condition above.
6202 * One of the trace_seq_* functions is not used properly.
6204 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6207 trace_access_unlock(iter->cpu_file);
6208 trace_event_read_unlock();
6210 /* Now copy what we have to the user */
6211 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6212 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6213 trace_seq_init(&iter->seq);
6216 * If there was nothing to send to user, in spite of consuming trace
6217 * entries, go back to wait for more entries.
6223 mutex_unlock(&iter->mutex);
6228 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6231 __free_page(spd->pages[idx]);
6234 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
6235 .confirm = generic_pipe_buf_confirm,
6236 .release = generic_pipe_buf_release,
6237 .steal = generic_pipe_buf_steal,
6238 .get = generic_pipe_buf_get,
6242 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6248 /* Seq buffer is page-sized, exactly what we need. */
6250 save_len = iter->seq.seq.len;
6251 ret = print_trace_line(iter);
6253 if (trace_seq_has_overflowed(&iter->seq)) {
6254 iter->seq.seq.len = save_len;
6259 * This should not be hit, because it should only
6260 * be set if the iter->seq overflowed. But check it
6261 * anyway to be safe.
6263 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6264 iter->seq.seq.len = save_len;
6268 count = trace_seq_used(&iter->seq) - save_len;
6271 iter->seq.seq.len = save_len;
6275 if (ret != TRACE_TYPE_NO_CONSUME)
6276 trace_consume(iter);
6278 if (!trace_find_next_entry_inc(iter)) {
6288 static ssize_t tracing_splice_read_pipe(struct file *filp,
6290 struct pipe_inode_info *pipe,
6294 struct page *pages_def[PIPE_DEF_BUFFERS];
6295 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6296 struct trace_iterator *iter = filp->private_data;
6297 struct splice_pipe_desc spd = {
6299 .partial = partial_def,
6300 .nr_pages = 0, /* This gets updated below. */
6301 .nr_pages_max = PIPE_DEF_BUFFERS,
6302 .ops = &tracing_pipe_buf_ops,
6303 .spd_release = tracing_spd_release_pipe,
6309 if (splice_grow_spd(pipe, &spd))
6312 mutex_lock(&iter->mutex);
6314 if (iter->trace->splice_read) {
6315 ret = iter->trace->splice_read(iter, filp,
6316 ppos, pipe, len, flags);
6321 ret = tracing_wait_pipe(filp);
6325 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6330 trace_event_read_lock();
6331 trace_access_lock(iter->cpu_file);
6333 /* Fill as many pages as possible. */
6334 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6335 spd.pages[i] = alloc_page(GFP_KERNEL);
6339 rem = tracing_fill_pipe_page(rem, iter);
6341 /* Copy the data into the page, so we can start over. */
6342 ret = trace_seq_to_buffer(&iter->seq,
6343 page_address(spd.pages[i]),
6344 trace_seq_used(&iter->seq));
6346 __free_page(spd.pages[i]);
6349 spd.partial[i].offset = 0;
6350 spd.partial[i].len = trace_seq_used(&iter->seq);
6352 trace_seq_init(&iter->seq);
6355 trace_access_unlock(iter->cpu_file);
6356 trace_event_read_unlock();
6357 mutex_unlock(&iter->mutex);
6362 ret = splice_to_pipe(pipe, &spd);
6366 splice_shrink_spd(&spd);
6370 mutex_unlock(&iter->mutex);
6375 tracing_entries_read(struct file *filp, char __user *ubuf,
6376 size_t cnt, loff_t *ppos)
6378 struct inode *inode = file_inode(filp);
6379 struct trace_array *tr = inode->i_private;
6380 int cpu = tracing_get_cpu(inode);
6385 mutex_lock(&trace_types_lock);
6387 if (cpu == RING_BUFFER_ALL_CPUS) {
6388 int cpu, buf_size_same;
6393 /* check if all cpu sizes are same */
6394 for_each_tracing_cpu(cpu) {
6395 /* fill in the size from first enabled cpu */
6397 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6398 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6404 if (buf_size_same) {
6405 if (!ring_buffer_expanded)
6406 r = sprintf(buf, "%lu (expanded: %lu)\n",
6408 trace_buf_size >> 10);
6410 r = sprintf(buf, "%lu\n", size >> 10);
6412 r = sprintf(buf, "X\n");
6414 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6416 mutex_unlock(&trace_types_lock);
6418 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6423 tracing_entries_write(struct file *filp, const char __user *ubuf,
6424 size_t cnt, loff_t *ppos)
6426 struct inode *inode = file_inode(filp);
6427 struct trace_array *tr = inode->i_private;
6431 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6435 /* must have at least 1 entry */
6439 /* value is in KB */
6441 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6451 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6452 size_t cnt, loff_t *ppos)
6454 struct trace_array *tr = filp->private_data;
6457 unsigned long size = 0, expanded_size = 0;
6459 mutex_lock(&trace_types_lock);
6460 for_each_tracing_cpu(cpu) {
6461 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6462 if (!ring_buffer_expanded)
6463 expanded_size += trace_buf_size >> 10;
6465 if (ring_buffer_expanded)
6466 r = sprintf(buf, "%lu\n", size);
6468 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6469 mutex_unlock(&trace_types_lock);
6471 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6475 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6476 size_t cnt, loff_t *ppos)
6479 * There is no need to read what the user has written, this function
6480 * is just to make sure that there is no error when "echo" is used
6489 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6491 struct trace_array *tr = inode->i_private;
6493 /* disable tracing ? */
6494 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6495 tracer_tracing_off(tr);
6496 /* resize the ring buffer to 0 */
6497 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6499 trace_array_put(tr);
6505 tracing_mark_write(struct file *filp, const char __user *ubuf,
6506 size_t cnt, loff_t *fpos)
6508 struct trace_array *tr = filp->private_data;
6509 struct ring_buffer_event *event;
6510 enum event_trigger_type tt = ETT_NONE;
6511 struct trace_buffer *buffer;
6512 struct print_entry *entry;
6513 unsigned long irq_flags;
6518 /* Used in tracing_mark_raw_write() as well */
6519 #define FAULTED_STR "<faulted>"
6520 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6522 if (tracing_disabled)
6525 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6528 if (cnt > TRACE_BUF_SIZE)
6529 cnt = TRACE_BUF_SIZE;
6531 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6533 local_save_flags(irq_flags);
6534 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6536 /* If less than "<faulted>", then make sure we can still add that */
6537 if (cnt < FAULTED_SIZE)
6538 size += FAULTED_SIZE - cnt;
6540 buffer = tr->array_buffer.buffer;
6541 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6542 irq_flags, preempt_count());
6543 if (unlikely(!event))
6544 /* Ring buffer disabled, return as if not open for write */
6547 entry = ring_buffer_event_data(event);
6548 entry->ip = _THIS_IP_;
6550 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6552 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6559 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6560 /* do not add \n before testing triggers, but add \0 */
6561 entry->buf[cnt] = '\0';
6562 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6565 if (entry->buf[cnt - 1] != '\n') {
6566 entry->buf[cnt] = '\n';
6567 entry->buf[cnt + 1] = '\0';
6569 entry->buf[cnt] = '\0';
6571 __buffer_unlock_commit(buffer, event);
6574 event_triggers_post_call(tr->trace_marker_file, tt);
6582 /* Limit it for now to 3K (including tag) */
6583 #define RAW_DATA_MAX_SIZE (1024*3)
6586 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6587 size_t cnt, loff_t *fpos)
6589 struct trace_array *tr = filp->private_data;
6590 struct ring_buffer_event *event;
6591 struct trace_buffer *buffer;
6592 struct raw_data_entry *entry;
6593 unsigned long irq_flags;
6598 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6600 if (tracing_disabled)
6603 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6606 /* The marker must at least have a tag id */
6607 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6610 if (cnt > TRACE_BUF_SIZE)
6611 cnt = TRACE_BUF_SIZE;
6613 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6615 local_save_flags(irq_flags);
6616 size = sizeof(*entry) + cnt;
6617 if (cnt < FAULT_SIZE_ID)
6618 size += FAULT_SIZE_ID - cnt;
6620 buffer = tr->array_buffer.buffer;
6621 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6622 irq_flags, preempt_count());
6624 /* Ring buffer disabled, return as if not open for write */
6627 entry = ring_buffer_event_data(event);
6629 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6632 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6637 __buffer_unlock_commit(buffer, event);
6645 static int tracing_clock_show(struct seq_file *m, void *v)
6647 struct trace_array *tr = m->private;
6650 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6652 "%s%s%s%s", i ? " " : "",
6653 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6654 i == tr->clock_id ? "]" : "");
6660 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6664 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6665 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6668 if (i == ARRAY_SIZE(trace_clocks))
6671 mutex_lock(&trace_types_lock);
6675 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6678 * New clock may not be consistent with the previous clock.
6679 * Reset the buffer so that it doesn't have incomparable timestamps.
6681 tracing_reset_online_cpus(&tr->array_buffer);
6683 #ifdef CONFIG_TRACER_MAX_TRACE
6684 if (tr->max_buffer.buffer)
6685 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6686 tracing_reset_online_cpus(&tr->max_buffer);
6689 mutex_unlock(&trace_types_lock);
6694 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6695 size_t cnt, loff_t *fpos)
6697 struct seq_file *m = filp->private_data;
6698 struct trace_array *tr = m->private;
6700 const char *clockstr;
6703 if (cnt >= sizeof(buf))
6706 if (copy_from_user(buf, ubuf, cnt))
6711 clockstr = strstrip(buf);
6713 ret = tracing_set_clock(tr, clockstr);
6722 static int tracing_clock_open(struct inode *inode, struct file *file)
6724 struct trace_array *tr = inode->i_private;
6727 ret = tracing_check_open_get_tr(tr);
6731 ret = single_open(file, tracing_clock_show, inode->i_private);
6733 trace_array_put(tr);
6738 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6740 struct trace_array *tr = m->private;
6742 mutex_lock(&trace_types_lock);
6744 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
6745 seq_puts(m, "delta [absolute]\n");
6747 seq_puts(m, "[delta] absolute\n");
6749 mutex_unlock(&trace_types_lock);
6754 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6756 struct trace_array *tr = inode->i_private;
6759 ret = tracing_check_open_get_tr(tr);
6763 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6765 trace_array_put(tr);
6770 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6774 mutex_lock(&trace_types_lock);
6776 if (abs && tr->time_stamp_abs_ref++)
6780 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6785 if (--tr->time_stamp_abs_ref)
6789 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
6791 #ifdef CONFIG_TRACER_MAX_TRACE
6792 if (tr->max_buffer.buffer)
6793 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6796 mutex_unlock(&trace_types_lock);
6801 struct ftrace_buffer_info {
6802 struct trace_iterator iter;
6804 unsigned int spare_cpu;
6808 #ifdef CONFIG_TRACER_SNAPSHOT
6809 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6811 struct trace_array *tr = inode->i_private;
6812 struct trace_iterator *iter;
6816 ret = tracing_check_open_get_tr(tr);
6820 if (file->f_mode & FMODE_READ) {
6821 iter = __tracing_open(inode, file, true);
6823 ret = PTR_ERR(iter);
6825 /* Writes still need the seq_file to hold the private data */
6827 m = kzalloc(sizeof(*m), GFP_KERNEL);
6830 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6838 iter->array_buffer = &tr->max_buffer;
6839 iter->cpu_file = tracing_get_cpu(inode);
6841 file->private_data = m;
6845 trace_array_put(tr);
6851 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6854 struct seq_file *m = filp->private_data;
6855 struct trace_iterator *iter = m->private;
6856 struct trace_array *tr = iter->tr;
6860 ret = tracing_update_buffers();
6864 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6868 mutex_lock(&trace_types_lock);
6870 if (tr->current_trace->use_max_tr) {
6875 arch_spin_lock(&tr->max_lock);
6876 if (tr->cond_snapshot)
6878 arch_spin_unlock(&tr->max_lock);
6884 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6888 if (tr->allocated_snapshot)
6892 /* Only allow per-cpu swap if the ring buffer supports it */
6893 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6894 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6899 if (tr->allocated_snapshot)
6900 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6901 &tr->array_buffer, iter->cpu_file);
6903 ret = tracing_alloc_snapshot_instance(tr);
6906 local_irq_disable();
6907 /* Now, we're going to swap */
6908 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6909 update_max_tr(tr, current, smp_processor_id(), NULL);
6911 update_max_tr_single(tr, current, iter->cpu_file);
6915 if (tr->allocated_snapshot) {
6916 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6917 tracing_reset_online_cpus(&tr->max_buffer);
6919 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
6929 mutex_unlock(&trace_types_lock);
6933 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6935 struct seq_file *m = file->private_data;
6938 ret = tracing_release(inode, file);
6940 if (file->f_mode & FMODE_READ)
6943 /* If write only, the seq_file is just a stub */
6951 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6952 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6953 size_t count, loff_t *ppos);
6954 static int tracing_buffers_release(struct inode *inode, struct file *file);
6955 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6956 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6958 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6960 struct ftrace_buffer_info *info;
6963 /* The following checks for tracefs lockdown */
6964 ret = tracing_buffers_open(inode, filp);
6968 info = filp->private_data;
6970 if (info->iter.trace->use_max_tr) {
6971 tracing_buffers_release(inode, filp);
6975 info->iter.snapshot = true;
6976 info->iter.array_buffer = &info->iter.tr->max_buffer;
6981 #endif /* CONFIG_TRACER_SNAPSHOT */
6984 static const struct file_operations tracing_thresh_fops = {
6985 .open = tracing_open_generic,
6986 .read = tracing_thresh_read,
6987 .write = tracing_thresh_write,
6988 .llseek = generic_file_llseek,
6991 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6992 static const struct file_operations tracing_max_lat_fops = {
6993 .open = tracing_open_generic,
6994 .read = tracing_max_lat_read,
6995 .write = tracing_max_lat_write,
6996 .llseek = generic_file_llseek,
7000 static const struct file_operations set_tracer_fops = {
7001 .open = tracing_open_generic,
7002 .read = tracing_set_trace_read,
7003 .write = tracing_set_trace_write,
7004 .llseek = generic_file_llseek,
7007 static const struct file_operations tracing_pipe_fops = {
7008 .open = tracing_open_pipe,
7009 .poll = tracing_poll_pipe,
7010 .read = tracing_read_pipe,
7011 .splice_read = tracing_splice_read_pipe,
7012 .release = tracing_release_pipe,
7013 .llseek = no_llseek,
7016 static const struct file_operations tracing_entries_fops = {
7017 .open = tracing_open_generic_tr,
7018 .read = tracing_entries_read,
7019 .write = tracing_entries_write,
7020 .llseek = generic_file_llseek,
7021 .release = tracing_release_generic_tr,
7024 static const struct file_operations tracing_total_entries_fops = {
7025 .open = tracing_open_generic_tr,
7026 .read = tracing_total_entries_read,
7027 .llseek = generic_file_llseek,
7028 .release = tracing_release_generic_tr,
7031 static const struct file_operations tracing_free_buffer_fops = {
7032 .open = tracing_open_generic_tr,
7033 .write = tracing_free_buffer_write,
7034 .release = tracing_free_buffer_release,
7037 static const struct file_operations tracing_mark_fops = {
7038 .open = tracing_open_generic_tr,
7039 .write = tracing_mark_write,
7040 .llseek = generic_file_llseek,
7041 .release = tracing_release_generic_tr,
7044 static const struct file_operations tracing_mark_raw_fops = {
7045 .open = tracing_open_generic_tr,
7046 .write = tracing_mark_raw_write,
7047 .llseek = generic_file_llseek,
7048 .release = tracing_release_generic_tr,
7051 static const struct file_operations trace_clock_fops = {
7052 .open = tracing_clock_open,
7054 .llseek = seq_lseek,
7055 .release = tracing_single_release_tr,
7056 .write = tracing_clock_write,
7059 static const struct file_operations trace_time_stamp_mode_fops = {
7060 .open = tracing_time_stamp_mode_open,
7062 .llseek = seq_lseek,
7063 .release = tracing_single_release_tr,
7066 #ifdef CONFIG_TRACER_SNAPSHOT
7067 static const struct file_operations snapshot_fops = {
7068 .open = tracing_snapshot_open,
7070 .write = tracing_snapshot_write,
7071 .llseek = tracing_lseek,
7072 .release = tracing_snapshot_release,
7075 static const struct file_operations snapshot_raw_fops = {
7076 .open = snapshot_raw_open,
7077 .read = tracing_buffers_read,
7078 .release = tracing_buffers_release,
7079 .splice_read = tracing_buffers_splice_read,
7080 .llseek = no_llseek,
7083 #endif /* CONFIG_TRACER_SNAPSHOT */
7085 #define TRACING_LOG_ERRS_MAX 8
7086 #define TRACING_LOG_LOC_MAX 128
7088 #define CMD_PREFIX " Command: "
7091 const char **errs; /* ptr to loc-specific array of err strings */
7092 u8 type; /* index into errs -> specific err string */
7093 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7097 struct tracing_log_err {
7098 struct list_head list;
7099 struct err_info info;
7100 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7101 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7104 static DEFINE_MUTEX(tracing_err_log_lock);
7106 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7108 struct tracing_log_err *err;
7110 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7111 err = kzalloc(sizeof(*err), GFP_KERNEL);
7113 err = ERR_PTR(-ENOMEM);
7114 tr->n_err_log_entries++;
7119 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7120 list_del(&err->list);
7126 * err_pos - find the position of a string within a command for error careting
7127 * @cmd: The tracing command that caused the error
7128 * @str: The string to position the caret at within @cmd
7130 * Finds the position of the first occurence of @str within @cmd. The
7131 * return value can be passed to tracing_log_err() for caret placement
7134 * Returns the index within @cmd of the first occurence of @str or 0
7135 * if @str was not found.
7137 unsigned int err_pos(char *cmd, const char *str)
7141 if (WARN_ON(!strlen(cmd)))
7144 found = strstr(cmd, str);
7152 * tracing_log_err - write an error to the tracing error log
7153 * @tr: The associated trace array for the error (NULL for top level array)
7154 * @loc: A string describing where the error occurred
7155 * @cmd: The tracing command that caused the error
7156 * @errs: The array of loc-specific static error strings
7157 * @type: The index into errs[], which produces the specific static err string
7158 * @pos: The position the caret should be placed in the cmd
7160 * Writes an error into tracing/error_log of the form:
7162 * <loc>: error: <text>
7166 * tracing/error_log is a small log file containing the last
7167 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7168 * unless there has been a tracing error, and the error log can be
7169 * cleared and have its memory freed by writing the empty string in
7170 * truncation mode to it i.e. echo > tracing/error_log.
7172 * NOTE: the @errs array along with the @type param are used to
7173 * produce a static error string - this string is not copied and saved
7174 * when the error is logged - only a pointer to it is saved. See
7175 * existing callers for examples of how static strings are typically
7176 * defined for use with tracing_log_err().
7178 void tracing_log_err(struct trace_array *tr,
7179 const char *loc, const char *cmd,
7180 const char **errs, u8 type, u8 pos)
7182 struct tracing_log_err *err;
7187 mutex_lock(&tracing_err_log_lock);
7188 err = get_tracing_log_err(tr);
7189 if (PTR_ERR(err) == -ENOMEM) {
7190 mutex_unlock(&tracing_err_log_lock);
7194 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7195 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7197 err->info.errs = errs;
7198 err->info.type = type;
7199 err->info.pos = pos;
7200 err->info.ts = local_clock();
7202 list_add_tail(&err->list, &tr->err_log);
7203 mutex_unlock(&tracing_err_log_lock);
7206 static void clear_tracing_err_log(struct trace_array *tr)
7208 struct tracing_log_err *err, *next;
7210 mutex_lock(&tracing_err_log_lock);
7211 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7212 list_del(&err->list);
7216 tr->n_err_log_entries = 0;
7217 mutex_unlock(&tracing_err_log_lock);
7220 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7222 struct trace_array *tr = m->private;
7224 mutex_lock(&tracing_err_log_lock);
7226 return seq_list_start(&tr->err_log, *pos);
7229 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7231 struct trace_array *tr = m->private;
7233 return seq_list_next(v, &tr->err_log, pos);
7236 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7238 mutex_unlock(&tracing_err_log_lock);
7241 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7245 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7247 for (i = 0; i < pos; i++)
7252 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7254 struct tracing_log_err *err = v;
7257 const char *err_text = err->info.errs[err->info.type];
7258 u64 sec = err->info.ts;
7261 nsec = do_div(sec, NSEC_PER_SEC);
7262 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7263 err->loc, err_text);
7264 seq_printf(m, "%s", err->cmd);
7265 tracing_err_log_show_pos(m, err->info.pos);
7271 static const struct seq_operations tracing_err_log_seq_ops = {
7272 .start = tracing_err_log_seq_start,
7273 .next = tracing_err_log_seq_next,
7274 .stop = tracing_err_log_seq_stop,
7275 .show = tracing_err_log_seq_show
7278 static int tracing_err_log_open(struct inode *inode, struct file *file)
7280 struct trace_array *tr = inode->i_private;
7283 ret = tracing_check_open_get_tr(tr);
7287 /* If this file was opened for write, then erase contents */
7288 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7289 clear_tracing_err_log(tr);
7291 if (file->f_mode & FMODE_READ) {
7292 ret = seq_open(file, &tracing_err_log_seq_ops);
7294 struct seq_file *m = file->private_data;
7297 trace_array_put(tr);
7303 static ssize_t tracing_err_log_write(struct file *file,
7304 const char __user *buffer,
7305 size_t count, loff_t *ppos)
7310 static int tracing_err_log_release(struct inode *inode, struct file *file)
7312 struct trace_array *tr = inode->i_private;
7314 trace_array_put(tr);
7316 if (file->f_mode & FMODE_READ)
7317 seq_release(inode, file);
7322 static const struct file_operations tracing_err_log_fops = {
7323 .open = tracing_err_log_open,
7324 .write = tracing_err_log_write,
7326 .llseek = seq_lseek,
7327 .release = tracing_err_log_release,
7330 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7332 struct trace_array *tr = inode->i_private;
7333 struct ftrace_buffer_info *info;
7336 ret = tracing_check_open_get_tr(tr);
7340 info = kzalloc(sizeof(*info), GFP_KERNEL);
7342 trace_array_put(tr);
7346 mutex_lock(&trace_types_lock);
7349 info->iter.cpu_file = tracing_get_cpu(inode);
7350 info->iter.trace = tr->current_trace;
7351 info->iter.array_buffer = &tr->array_buffer;
7353 /* Force reading ring buffer for first read */
7354 info->read = (unsigned int)-1;
7356 filp->private_data = info;
7358 tr->current_trace->ref++;
7360 mutex_unlock(&trace_types_lock);
7362 ret = nonseekable_open(inode, filp);
7364 trace_array_put(tr);
7370 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7372 struct ftrace_buffer_info *info = filp->private_data;
7373 struct trace_iterator *iter = &info->iter;
7375 return trace_poll(iter, filp, poll_table);
7379 tracing_buffers_read(struct file *filp, char __user *ubuf,
7380 size_t count, loff_t *ppos)
7382 struct ftrace_buffer_info *info = filp->private_data;
7383 struct trace_iterator *iter = &info->iter;
7390 #ifdef CONFIG_TRACER_MAX_TRACE
7391 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7396 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7398 if (IS_ERR(info->spare)) {
7399 ret = PTR_ERR(info->spare);
7402 info->spare_cpu = iter->cpu_file;
7408 /* Do we have previous read data to read? */
7409 if (info->read < PAGE_SIZE)
7413 trace_access_lock(iter->cpu_file);
7414 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7418 trace_access_unlock(iter->cpu_file);
7421 if (trace_empty(iter)) {
7422 if ((filp->f_flags & O_NONBLOCK))
7425 ret = wait_on_pipe(iter, 0);
7436 size = PAGE_SIZE - info->read;
7440 ret = copy_to_user(ubuf, info->spare + info->read, size);
7452 static int tracing_buffers_release(struct inode *inode, struct file *file)
7454 struct ftrace_buffer_info *info = file->private_data;
7455 struct trace_iterator *iter = &info->iter;
7457 mutex_lock(&trace_types_lock);
7459 iter->tr->current_trace->ref--;
7461 __trace_array_put(iter->tr);
7464 ring_buffer_free_read_page(iter->array_buffer->buffer,
7465 info->spare_cpu, info->spare);
7468 mutex_unlock(&trace_types_lock);
7474 struct trace_buffer *buffer;
7477 refcount_t refcount;
7480 static void buffer_ref_release(struct buffer_ref *ref)
7482 if (!refcount_dec_and_test(&ref->refcount))
7484 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7488 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7489 struct pipe_buffer *buf)
7491 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7493 buffer_ref_release(ref);
7497 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7498 struct pipe_buffer *buf)
7500 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7502 if (refcount_read(&ref->refcount) > INT_MAX/2)
7505 refcount_inc(&ref->refcount);
7509 /* Pipe buffer operations for a buffer. */
7510 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7511 .confirm = generic_pipe_buf_confirm,
7512 .release = buffer_pipe_buf_release,
7513 .steal = generic_pipe_buf_nosteal,
7514 .get = buffer_pipe_buf_get,
7518 * Callback from splice_to_pipe(), if we need to release some pages
7519 * at the end of the spd in case we error'ed out in filling the pipe.
7521 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7523 struct buffer_ref *ref =
7524 (struct buffer_ref *)spd->partial[i].private;
7526 buffer_ref_release(ref);
7527 spd->partial[i].private = 0;
7531 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7532 struct pipe_inode_info *pipe, size_t len,
7535 struct ftrace_buffer_info *info = file->private_data;
7536 struct trace_iterator *iter = &info->iter;
7537 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7538 struct page *pages_def[PIPE_DEF_BUFFERS];
7539 struct splice_pipe_desc spd = {
7541 .partial = partial_def,
7542 .nr_pages_max = PIPE_DEF_BUFFERS,
7543 .ops = &buffer_pipe_buf_ops,
7544 .spd_release = buffer_spd_release,
7546 struct buffer_ref *ref;
7550 #ifdef CONFIG_TRACER_MAX_TRACE
7551 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7555 if (*ppos & (PAGE_SIZE - 1))
7558 if (len & (PAGE_SIZE - 1)) {
7559 if (len < PAGE_SIZE)
7564 if (splice_grow_spd(pipe, &spd))
7568 trace_access_lock(iter->cpu_file);
7569 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7571 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7575 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7581 refcount_set(&ref->refcount, 1);
7582 ref->buffer = iter->array_buffer->buffer;
7583 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7584 if (IS_ERR(ref->page)) {
7585 ret = PTR_ERR(ref->page);
7590 ref->cpu = iter->cpu_file;
7592 r = ring_buffer_read_page(ref->buffer, &ref->page,
7593 len, iter->cpu_file, 1);
7595 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7601 page = virt_to_page(ref->page);
7603 spd.pages[i] = page;
7604 spd.partial[i].len = PAGE_SIZE;
7605 spd.partial[i].offset = 0;
7606 spd.partial[i].private = (unsigned long)ref;
7610 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7613 trace_access_unlock(iter->cpu_file);
7616 /* did we read anything? */
7617 if (!spd.nr_pages) {
7622 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7625 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7632 ret = splice_to_pipe(pipe, &spd);
7634 splice_shrink_spd(&spd);
7639 static const struct file_operations tracing_buffers_fops = {
7640 .open = tracing_buffers_open,
7641 .read = tracing_buffers_read,
7642 .poll = tracing_buffers_poll,
7643 .release = tracing_buffers_release,
7644 .splice_read = tracing_buffers_splice_read,
7645 .llseek = no_llseek,
7649 tracing_stats_read(struct file *filp, char __user *ubuf,
7650 size_t count, loff_t *ppos)
7652 struct inode *inode = file_inode(filp);
7653 struct trace_array *tr = inode->i_private;
7654 struct array_buffer *trace_buf = &tr->array_buffer;
7655 int cpu = tracing_get_cpu(inode);
7656 struct trace_seq *s;
7658 unsigned long long t;
7659 unsigned long usec_rem;
7661 s = kmalloc(sizeof(*s), GFP_KERNEL);
7667 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7668 trace_seq_printf(s, "entries: %ld\n", cnt);
7670 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7671 trace_seq_printf(s, "overrun: %ld\n", cnt);
7673 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7674 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7676 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7677 trace_seq_printf(s, "bytes: %ld\n", cnt);
7679 if (trace_clocks[tr->clock_id].in_ns) {
7680 /* local or global for trace_clock */
7681 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7682 usec_rem = do_div(t, USEC_PER_SEC);
7683 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7686 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7687 usec_rem = do_div(t, USEC_PER_SEC);
7688 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7690 /* counter or tsc mode for trace_clock */
7691 trace_seq_printf(s, "oldest event ts: %llu\n",
7692 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7694 trace_seq_printf(s, "now ts: %llu\n",
7695 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7698 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7699 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7701 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7702 trace_seq_printf(s, "read events: %ld\n", cnt);
7704 count = simple_read_from_buffer(ubuf, count, ppos,
7705 s->buffer, trace_seq_used(s));
7712 static const struct file_operations tracing_stats_fops = {
7713 .open = tracing_open_generic_tr,
7714 .read = tracing_stats_read,
7715 .llseek = generic_file_llseek,
7716 .release = tracing_release_generic_tr,
7719 #ifdef CONFIG_DYNAMIC_FTRACE
7722 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7723 size_t cnt, loff_t *ppos)
7729 /* 256 should be plenty to hold the amount needed */
7730 buf = kmalloc(256, GFP_KERNEL);
7734 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7735 ftrace_update_tot_cnt,
7736 ftrace_number_of_pages,
7737 ftrace_number_of_groups);
7739 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7744 static const struct file_operations tracing_dyn_info_fops = {
7745 .open = tracing_open_generic,
7746 .read = tracing_read_dyn_info,
7747 .llseek = generic_file_llseek,
7749 #endif /* CONFIG_DYNAMIC_FTRACE */
7751 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7753 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7754 struct trace_array *tr, struct ftrace_probe_ops *ops,
7757 tracing_snapshot_instance(tr);
7761 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7762 struct trace_array *tr, struct ftrace_probe_ops *ops,
7765 struct ftrace_func_mapper *mapper = data;
7769 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7779 tracing_snapshot_instance(tr);
7783 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7784 struct ftrace_probe_ops *ops, void *data)
7786 struct ftrace_func_mapper *mapper = data;
7789 seq_printf(m, "%ps:", (void *)ip);
7791 seq_puts(m, "snapshot");
7794 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7797 seq_printf(m, ":count=%ld\n", *count);
7799 seq_puts(m, ":unlimited\n");
7805 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7806 unsigned long ip, void *init_data, void **data)
7808 struct ftrace_func_mapper *mapper = *data;
7811 mapper = allocate_ftrace_func_mapper();
7817 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7821 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7822 unsigned long ip, void *data)
7824 struct ftrace_func_mapper *mapper = data;
7829 free_ftrace_func_mapper(mapper, NULL);
7833 ftrace_func_mapper_remove_ip(mapper, ip);
7836 static struct ftrace_probe_ops snapshot_probe_ops = {
7837 .func = ftrace_snapshot,
7838 .print = ftrace_snapshot_print,
7841 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7842 .func = ftrace_count_snapshot,
7843 .print = ftrace_snapshot_print,
7844 .init = ftrace_snapshot_init,
7845 .free = ftrace_snapshot_free,
7849 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7850 char *glob, char *cmd, char *param, int enable)
7852 struct ftrace_probe_ops *ops;
7853 void *count = (void *)-1;
7860 /* hash funcs only work with set_ftrace_filter */
7864 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7867 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7872 number = strsep(¶m, ":");
7874 if (!strlen(number))
7878 * We use the callback data field (which is a pointer)
7881 ret = kstrtoul(number, 0, (unsigned long *)&count);
7886 ret = tracing_alloc_snapshot_instance(tr);
7890 ret = register_ftrace_function_probe(glob, tr, ops, count);
7893 return ret < 0 ? ret : 0;
7896 static struct ftrace_func_command ftrace_snapshot_cmd = {
7898 .func = ftrace_trace_snapshot_callback,
7901 static __init int register_snapshot_cmd(void)
7903 return register_ftrace_command(&ftrace_snapshot_cmd);
7906 static inline __init int register_snapshot_cmd(void) { return 0; }
7907 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7909 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7911 if (WARN_ON(!tr->dir))
7912 return ERR_PTR(-ENODEV);
7914 /* Top directory uses NULL as the parent */
7915 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7918 /* All sub buffers have a descriptor */
7922 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7924 struct dentry *d_tracer;
7927 return tr->percpu_dir;
7929 d_tracer = tracing_get_dentry(tr);
7930 if (IS_ERR(d_tracer))
7933 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7935 MEM_FAIL(!tr->percpu_dir,
7936 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7938 return tr->percpu_dir;
7941 static struct dentry *
7942 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7943 void *data, long cpu, const struct file_operations *fops)
7945 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7947 if (ret) /* See tracing_get_cpu() */
7948 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7953 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7955 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7956 struct dentry *d_cpu;
7957 char cpu_dir[30]; /* 30 characters should be more than enough */
7962 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7963 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7965 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7969 /* per cpu trace_pipe */
7970 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7971 tr, cpu, &tracing_pipe_fops);
7974 trace_create_cpu_file("trace", 0644, d_cpu,
7975 tr, cpu, &tracing_fops);
7977 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7978 tr, cpu, &tracing_buffers_fops);
7980 trace_create_cpu_file("stats", 0444, d_cpu,
7981 tr, cpu, &tracing_stats_fops);
7983 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7984 tr, cpu, &tracing_entries_fops);
7986 #ifdef CONFIG_TRACER_SNAPSHOT
7987 trace_create_cpu_file("snapshot", 0644, d_cpu,
7988 tr, cpu, &snapshot_fops);
7990 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7991 tr, cpu, &snapshot_raw_fops);
7995 #ifdef CONFIG_FTRACE_SELFTEST
7996 /* Let selftest have access to static functions in this file */
7997 #include "trace_selftest.c"
8001 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8004 struct trace_option_dentry *topt = filp->private_data;
8007 if (topt->flags->val & topt->opt->bit)
8012 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8016 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8019 struct trace_option_dentry *topt = filp->private_data;
8023 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8027 if (val != 0 && val != 1)
8030 if (!!(topt->flags->val & topt->opt->bit) != val) {
8031 mutex_lock(&trace_types_lock);
8032 ret = __set_tracer_option(topt->tr, topt->flags,
8034 mutex_unlock(&trace_types_lock);
8045 static const struct file_operations trace_options_fops = {
8046 .open = tracing_open_generic,
8047 .read = trace_options_read,
8048 .write = trace_options_write,
8049 .llseek = generic_file_llseek,
8053 * In order to pass in both the trace_array descriptor as well as the index
8054 * to the flag that the trace option file represents, the trace_array
8055 * has a character array of trace_flags_index[], which holds the index
8056 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8057 * The address of this character array is passed to the flag option file
8058 * read/write callbacks.
8060 * In order to extract both the index and the trace_array descriptor,
8061 * get_tr_index() uses the following algorithm.
8065 * As the pointer itself contains the address of the index (remember
8068 * Then to get the trace_array descriptor, by subtracting that index
8069 * from the ptr, we get to the start of the index itself.
8071 * ptr - idx == &index[0]
8073 * Then a simple container_of() from that pointer gets us to the
8074 * trace_array descriptor.
8076 static void get_tr_index(void *data, struct trace_array **ptr,
8077 unsigned int *pindex)
8079 *pindex = *(unsigned char *)data;
8081 *ptr = container_of(data - *pindex, struct trace_array,
8086 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8089 void *tr_index = filp->private_data;
8090 struct trace_array *tr;
8094 get_tr_index(tr_index, &tr, &index);
8096 if (tr->trace_flags & (1 << index))
8101 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8105 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8108 void *tr_index = filp->private_data;
8109 struct trace_array *tr;
8114 get_tr_index(tr_index, &tr, &index);
8116 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8120 if (val != 0 && val != 1)
8123 mutex_lock(&event_mutex);
8124 mutex_lock(&trace_types_lock);
8125 ret = set_tracer_flag(tr, 1 << index, val);
8126 mutex_unlock(&trace_types_lock);
8127 mutex_unlock(&event_mutex);
8137 static const struct file_operations trace_options_core_fops = {
8138 .open = tracing_open_generic,
8139 .read = trace_options_core_read,
8140 .write = trace_options_core_write,
8141 .llseek = generic_file_llseek,
8144 struct dentry *trace_create_file(const char *name,
8146 struct dentry *parent,
8148 const struct file_operations *fops)
8152 ret = tracefs_create_file(name, mode, parent, data, fops);
8154 pr_warn("Could not create tracefs '%s' entry\n", name);
8160 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8162 struct dentry *d_tracer;
8167 d_tracer = tracing_get_dentry(tr);
8168 if (IS_ERR(d_tracer))
8171 tr->options = tracefs_create_dir("options", d_tracer);
8173 pr_warn("Could not create tracefs directory 'options'\n");
8181 create_trace_option_file(struct trace_array *tr,
8182 struct trace_option_dentry *topt,
8183 struct tracer_flags *flags,
8184 struct tracer_opt *opt)
8186 struct dentry *t_options;
8188 t_options = trace_options_init_dentry(tr);
8192 topt->flags = flags;
8196 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8197 &trace_options_fops);
8202 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8204 struct trace_option_dentry *topts;
8205 struct trace_options *tr_topts;
8206 struct tracer_flags *flags;
8207 struct tracer_opt *opts;
8214 flags = tracer->flags;
8216 if (!flags || !flags->opts)
8220 * If this is an instance, only create flags for tracers
8221 * the instance may have.
8223 if (!trace_ok_for_array(tracer, tr))
8226 for (i = 0; i < tr->nr_topts; i++) {
8227 /* Make sure there's no duplicate flags. */
8228 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8234 for (cnt = 0; opts[cnt].name; cnt++)
8237 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8241 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8248 tr->topts = tr_topts;
8249 tr->topts[tr->nr_topts].tracer = tracer;
8250 tr->topts[tr->nr_topts].topts = topts;
8253 for (cnt = 0; opts[cnt].name; cnt++) {
8254 create_trace_option_file(tr, &topts[cnt], flags,
8256 MEM_FAIL(topts[cnt].entry == NULL,
8257 "Failed to create trace option: %s",
8262 static struct dentry *
8263 create_trace_option_core_file(struct trace_array *tr,
8264 const char *option, long index)
8266 struct dentry *t_options;
8268 t_options = trace_options_init_dentry(tr);
8272 return trace_create_file(option, 0644, t_options,
8273 (void *)&tr->trace_flags_index[index],
8274 &trace_options_core_fops);
8277 static void create_trace_options_dir(struct trace_array *tr)
8279 struct dentry *t_options;
8280 bool top_level = tr == &global_trace;
8283 t_options = trace_options_init_dentry(tr);
8287 for (i = 0; trace_options[i]; i++) {
8289 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8290 create_trace_option_core_file(tr, trace_options[i], i);
8295 rb_simple_read(struct file *filp, char __user *ubuf,
8296 size_t cnt, loff_t *ppos)
8298 struct trace_array *tr = filp->private_data;
8302 r = tracer_tracing_is_on(tr);
8303 r = sprintf(buf, "%d\n", r);
8305 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8309 rb_simple_write(struct file *filp, const char __user *ubuf,
8310 size_t cnt, loff_t *ppos)
8312 struct trace_array *tr = filp->private_data;
8313 struct trace_buffer *buffer = tr->array_buffer.buffer;
8317 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8322 mutex_lock(&trace_types_lock);
8323 if (!!val == tracer_tracing_is_on(tr)) {
8324 val = 0; /* do nothing */
8326 tracer_tracing_on(tr);
8327 if (tr->current_trace->start)
8328 tr->current_trace->start(tr);
8330 tracer_tracing_off(tr);
8331 if (tr->current_trace->stop)
8332 tr->current_trace->stop(tr);
8334 mutex_unlock(&trace_types_lock);
8342 static const struct file_operations rb_simple_fops = {
8343 .open = tracing_open_generic_tr,
8344 .read = rb_simple_read,
8345 .write = rb_simple_write,
8346 .release = tracing_release_generic_tr,
8347 .llseek = default_llseek,
8351 buffer_percent_read(struct file *filp, char __user *ubuf,
8352 size_t cnt, loff_t *ppos)
8354 struct trace_array *tr = filp->private_data;
8358 r = tr->buffer_percent;
8359 r = sprintf(buf, "%d\n", r);
8361 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8365 buffer_percent_write(struct file *filp, const char __user *ubuf,
8366 size_t cnt, loff_t *ppos)
8368 struct trace_array *tr = filp->private_data;
8372 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8382 tr->buffer_percent = val;
8389 static const struct file_operations buffer_percent_fops = {
8390 .open = tracing_open_generic_tr,
8391 .read = buffer_percent_read,
8392 .write = buffer_percent_write,
8393 .release = tracing_release_generic_tr,
8394 .llseek = default_llseek,
8397 static struct dentry *trace_instance_dir;
8400 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8403 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8405 enum ring_buffer_flags rb_flags;
8407 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8411 buf->buffer = ring_buffer_alloc(size, rb_flags);
8415 buf->data = alloc_percpu(struct trace_array_cpu);
8417 ring_buffer_free(buf->buffer);
8422 /* Allocate the first page for all buffers */
8423 set_buffer_entries(&tr->array_buffer,
8424 ring_buffer_size(tr->array_buffer.buffer, 0));
8429 static int allocate_trace_buffers(struct trace_array *tr, int size)
8433 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8437 #ifdef CONFIG_TRACER_MAX_TRACE
8438 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8439 allocate_snapshot ? size : 1);
8440 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8441 ring_buffer_free(tr->array_buffer.buffer);
8442 tr->array_buffer.buffer = NULL;
8443 free_percpu(tr->array_buffer.data);
8444 tr->array_buffer.data = NULL;
8447 tr->allocated_snapshot = allocate_snapshot;
8450 * Only the top level trace array gets its snapshot allocated
8451 * from the kernel command line.
8453 allocate_snapshot = false;
8458 static void free_trace_buffer(struct array_buffer *buf)
8461 ring_buffer_free(buf->buffer);
8463 free_percpu(buf->data);
8468 static void free_trace_buffers(struct trace_array *tr)
8473 free_trace_buffer(&tr->array_buffer);
8475 #ifdef CONFIG_TRACER_MAX_TRACE
8476 free_trace_buffer(&tr->max_buffer);
8480 static void init_trace_flags_index(struct trace_array *tr)
8484 /* Used by the trace options files */
8485 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8486 tr->trace_flags_index[i] = i;
8489 static void __update_tracer_options(struct trace_array *tr)
8493 for (t = trace_types; t; t = t->next)
8494 add_tracer_options(tr, t);
8497 static void update_tracer_options(struct trace_array *tr)
8499 mutex_lock(&trace_types_lock);
8500 __update_tracer_options(tr);
8501 mutex_unlock(&trace_types_lock);
8504 /* Must have trace_types_lock held */
8505 struct trace_array *trace_array_find(const char *instance)
8507 struct trace_array *tr, *found = NULL;
8509 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8510 if (tr->name && strcmp(tr->name, instance) == 0) {
8519 struct trace_array *trace_array_find_get(const char *instance)
8521 struct trace_array *tr;
8523 mutex_lock(&trace_types_lock);
8524 tr = trace_array_find(instance);
8527 mutex_unlock(&trace_types_lock);
8532 static struct trace_array *trace_array_create(const char *name)
8534 struct trace_array *tr;
8538 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8540 return ERR_PTR(ret);
8542 tr->name = kstrdup(name, GFP_KERNEL);
8546 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8549 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8551 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8553 raw_spin_lock_init(&tr->start_lock);
8555 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8557 tr->current_trace = &nop_trace;
8559 INIT_LIST_HEAD(&tr->systems);
8560 INIT_LIST_HEAD(&tr->events);
8561 INIT_LIST_HEAD(&tr->hist_vars);
8562 INIT_LIST_HEAD(&tr->err_log);
8564 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8567 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8571 ret = event_trace_add_tracer(tr->dir, tr);
8573 tracefs_remove(tr->dir);
8577 ftrace_init_trace_array(tr);
8579 init_tracer_tracefs(tr, tr->dir);
8580 init_trace_flags_index(tr);
8581 __update_tracer_options(tr);
8583 list_add(&tr->list, &ftrace_trace_arrays);
8591 free_trace_buffers(tr);
8592 free_cpumask_var(tr->tracing_cpumask);
8596 return ERR_PTR(ret);
8599 static int instance_mkdir(const char *name)
8601 struct trace_array *tr;
8604 mutex_lock(&event_mutex);
8605 mutex_lock(&trace_types_lock);
8608 if (trace_array_find(name))
8611 tr = trace_array_create(name);
8613 ret = PTR_ERR_OR_ZERO(tr);
8616 mutex_unlock(&trace_types_lock);
8617 mutex_unlock(&event_mutex);
8622 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8623 * @name: The name of the trace array to be looked up/created.
8625 * Returns pointer to trace array with given name.
8626 * NULL, if it cannot be created.
8628 * NOTE: This function increments the reference counter associated with the
8629 * trace array returned. This makes sure it cannot be freed while in use.
8630 * Use trace_array_put() once the trace array is no longer needed.
8631 * If the trace_array is to be freed, trace_array_destroy() needs to
8632 * be called after the trace_array_put(), or simply let user space delete
8633 * it from the tracefs instances directory. But until the
8634 * trace_array_put() is called, user space can not delete it.
8637 struct trace_array *trace_array_get_by_name(const char *name)
8639 struct trace_array *tr;
8641 mutex_lock(&event_mutex);
8642 mutex_lock(&trace_types_lock);
8644 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8645 if (tr->name && strcmp(tr->name, name) == 0)
8649 tr = trace_array_create(name);
8657 mutex_unlock(&trace_types_lock);
8658 mutex_unlock(&event_mutex);
8661 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8663 static int __remove_instance(struct trace_array *tr)
8667 /* Reference counter for a newly created trace array = 1. */
8668 if (tr->ref > 1 || (tr->current_trace && tr->current_trace->ref))
8671 list_del(&tr->list);
8673 /* Disable all the flags that were enabled coming in */
8674 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8675 if ((1 << i) & ZEROED_TRACE_FLAGS)
8676 set_tracer_flag(tr, 1 << i, 0);
8679 tracing_set_nop(tr);
8680 clear_ftrace_function_probes(tr);
8681 event_trace_del_tracer(tr);
8682 ftrace_clear_pids(tr);
8683 ftrace_destroy_function_files(tr);
8684 tracefs_remove(tr->dir);
8685 free_trace_buffers(tr);
8687 for (i = 0; i < tr->nr_topts; i++) {
8688 kfree(tr->topts[i].topts);
8692 free_cpumask_var(tr->tracing_cpumask);
8700 int trace_array_destroy(struct trace_array *this_tr)
8702 struct trace_array *tr;
8708 mutex_lock(&event_mutex);
8709 mutex_lock(&trace_types_lock);
8713 /* Making sure trace array exists before destroying it. */
8714 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8715 if (tr == this_tr) {
8716 ret = __remove_instance(tr);
8721 mutex_unlock(&trace_types_lock);
8722 mutex_unlock(&event_mutex);
8726 EXPORT_SYMBOL_GPL(trace_array_destroy);
8728 static int instance_rmdir(const char *name)
8730 struct trace_array *tr;
8733 mutex_lock(&event_mutex);
8734 mutex_lock(&trace_types_lock);
8737 tr = trace_array_find(name);
8739 ret = __remove_instance(tr);
8741 mutex_unlock(&trace_types_lock);
8742 mutex_unlock(&event_mutex);
8747 static __init void create_trace_instances(struct dentry *d_tracer)
8749 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8752 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
8757 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8759 struct trace_event_file *file;
8762 trace_create_file("available_tracers", 0444, d_tracer,
8763 tr, &show_traces_fops);
8765 trace_create_file("current_tracer", 0644, d_tracer,
8766 tr, &set_tracer_fops);
8768 trace_create_file("tracing_cpumask", 0644, d_tracer,
8769 tr, &tracing_cpumask_fops);
8771 trace_create_file("trace_options", 0644, d_tracer,
8772 tr, &tracing_iter_fops);
8774 trace_create_file("trace", 0644, d_tracer,
8777 trace_create_file("trace_pipe", 0444, d_tracer,
8778 tr, &tracing_pipe_fops);
8780 trace_create_file("buffer_size_kb", 0644, d_tracer,
8781 tr, &tracing_entries_fops);
8783 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8784 tr, &tracing_total_entries_fops);
8786 trace_create_file("free_buffer", 0200, d_tracer,
8787 tr, &tracing_free_buffer_fops);
8789 trace_create_file("trace_marker", 0220, d_tracer,
8790 tr, &tracing_mark_fops);
8792 file = __find_event_file(tr, "ftrace", "print");
8793 if (file && file->dir)
8794 trace_create_file("trigger", 0644, file->dir, file,
8795 &event_trigger_fops);
8796 tr->trace_marker_file = file;
8798 trace_create_file("trace_marker_raw", 0220, d_tracer,
8799 tr, &tracing_mark_raw_fops);
8801 trace_create_file("trace_clock", 0644, d_tracer, tr,
8804 trace_create_file("tracing_on", 0644, d_tracer,
8805 tr, &rb_simple_fops);
8807 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8808 &trace_time_stamp_mode_fops);
8810 tr->buffer_percent = 50;
8812 trace_create_file("buffer_percent", 0444, d_tracer,
8813 tr, &buffer_percent_fops);
8815 create_trace_options_dir(tr);
8817 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8818 trace_create_maxlat_file(tr, d_tracer);
8821 if (ftrace_create_function_files(tr, d_tracer))
8822 MEM_FAIL(1, "Could not allocate function filter files");
8824 #ifdef CONFIG_TRACER_SNAPSHOT
8825 trace_create_file("snapshot", 0644, d_tracer,
8826 tr, &snapshot_fops);
8829 trace_create_file("error_log", 0644, d_tracer,
8830 tr, &tracing_err_log_fops);
8832 for_each_tracing_cpu(cpu)
8833 tracing_init_tracefs_percpu(tr, cpu);
8835 ftrace_init_tracefs(tr, d_tracer);
8838 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8840 struct vfsmount *mnt;
8841 struct file_system_type *type;
8844 * To maintain backward compatibility for tools that mount
8845 * debugfs to get to the tracing facility, tracefs is automatically
8846 * mounted to the debugfs/tracing directory.
8848 type = get_fs_type("tracefs");
8851 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8852 put_filesystem(type);
8861 * tracing_init_dentry - initialize top level trace array
8863 * This is called when creating files or directories in the tracing
8864 * directory. It is called via fs_initcall() by any of the boot up code
8865 * and expects to return the dentry of the top level tracing directory.
8867 struct dentry *tracing_init_dentry(void)
8869 struct trace_array *tr = &global_trace;
8871 if (security_locked_down(LOCKDOWN_TRACEFS)) {
8872 pr_warn("Tracing disabled due to lockdown\n");
8873 return ERR_PTR(-EPERM);
8876 /* The top level trace array uses NULL as parent */
8880 if (WARN_ON(!tracefs_initialized()) ||
8881 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8882 WARN_ON(!debugfs_initialized())))
8883 return ERR_PTR(-ENODEV);
8886 * As there may still be users that expect the tracing
8887 * files to exist in debugfs/tracing, we must automount
8888 * the tracefs file system there, so older tools still
8889 * work with the newer kerenl.
8891 tr->dir = debugfs_create_automount("tracing", NULL,
8892 trace_automount, NULL);
8897 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8898 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8900 static void __init trace_eval_init(void)
8904 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8905 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8908 #ifdef CONFIG_MODULES
8909 static void trace_module_add_evals(struct module *mod)
8911 if (!mod->num_trace_evals)
8915 * Modules with bad taint do not have events created, do
8916 * not bother with enums either.
8918 if (trace_module_has_bad_taint(mod))
8921 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8924 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8925 static void trace_module_remove_evals(struct module *mod)
8927 union trace_eval_map_item *map;
8928 union trace_eval_map_item **last = &trace_eval_maps;
8930 if (!mod->num_trace_evals)
8933 mutex_lock(&trace_eval_mutex);
8935 map = trace_eval_maps;
8938 if (map->head.mod == mod)
8940 map = trace_eval_jmp_to_tail(map);
8941 last = &map->tail.next;
8942 map = map->tail.next;
8947 *last = trace_eval_jmp_to_tail(map)->tail.next;
8950 mutex_unlock(&trace_eval_mutex);
8953 static inline void trace_module_remove_evals(struct module *mod) { }
8954 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8956 static int trace_module_notify(struct notifier_block *self,
8957 unsigned long val, void *data)
8959 struct module *mod = data;
8962 case MODULE_STATE_COMING:
8963 trace_module_add_evals(mod);
8965 case MODULE_STATE_GOING:
8966 trace_module_remove_evals(mod);
8973 static struct notifier_block trace_module_nb = {
8974 .notifier_call = trace_module_notify,
8977 #endif /* CONFIG_MODULES */
8979 static __init int tracer_init_tracefs(void)
8981 struct dentry *d_tracer;
8983 trace_access_lock_init();
8985 d_tracer = tracing_init_dentry();
8986 if (IS_ERR(d_tracer))
8991 init_tracer_tracefs(&global_trace, d_tracer);
8992 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8994 trace_create_file("tracing_thresh", 0644, d_tracer,
8995 &global_trace, &tracing_thresh_fops);
8997 trace_create_file("README", 0444, d_tracer,
8998 NULL, &tracing_readme_fops);
9000 trace_create_file("saved_cmdlines", 0444, d_tracer,
9001 NULL, &tracing_saved_cmdlines_fops);
9003 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
9004 NULL, &tracing_saved_cmdlines_size_fops);
9006 trace_create_file("saved_tgids", 0444, d_tracer,
9007 NULL, &tracing_saved_tgids_fops);
9011 trace_create_eval_file(d_tracer);
9013 #ifdef CONFIG_MODULES
9014 register_module_notifier(&trace_module_nb);
9017 #ifdef CONFIG_DYNAMIC_FTRACE
9018 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
9019 NULL, &tracing_dyn_info_fops);
9022 create_trace_instances(d_tracer);
9024 update_tracer_options(&global_trace);
9029 static int trace_panic_handler(struct notifier_block *this,
9030 unsigned long event, void *unused)
9032 if (ftrace_dump_on_oops)
9033 ftrace_dump(ftrace_dump_on_oops);
9037 static struct notifier_block trace_panic_notifier = {
9038 .notifier_call = trace_panic_handler,
9040 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9043 static int trace_die_handler(struct notifier_block *self,
9049 if (ftrace_dump_on_oops)
9050 ftrace_dump(ftrace_dump_on_oops);
9058 static struct notifier_block trace_die_notifier = {
9059 .notifier_call = trace_die_handler,
9064 * printk is set to max of 1024, we really don't need it that big.
9065 * Nothing should be printing 1000 characters anyway.
9067 #define TRACE_MAX_PRINT 1000
9070 * Define here KERN_TRACE so that we have one place to modify
9071 * it if we decide to change what log level the ftrace dump
9074 #define KERN_TRACE KERN_EMERG
9077 trace_printk_seq(struct trace_seq *s)
9079 /* Probably should print a warning here. */
9080 if (s->seq.len >= TRACE_MAX_PRINT)
9081 s->seq.len = TRACE_MAX_PRINT;
9084 * More paranoid code. Although the buffer size is set to
9085 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9086 * an extra layer of protection.
9088 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9089 s->seq.len = s->seq.size - 1;
9091 /* should be zero ended, but we are paranoid. */
9092 s->buffer[s->seq.len] = 0;
9094 printk(KERN_TRACE "%s", s->buffer);
9099 void trace_init_global_iter(struct trace_iterator *iter)
9101 iter->tr = &global_trace;
9102 iter->trace = iter->tr->current_trace;
9103 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9104 iter->array_buffer = &global_trace.array_buffer;
9106 if (iter->trace && iter->trace->open)
9107 iter->trace->open(iter);
9109 /* Annotate start of buffers if we had overruns */
9110 if (ring_buffer_overruns(iter->array_buffer->buffer))
9111 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9113 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9114 if (trace_clocks[iter->tr->clock_id].in_ns)
9115 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9118 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9120 /* use static because iter can be a bit big for the stack */
9121 static struct trace_iterator iter;
9122 static atomic_t dump_running;
9123 struct trace_array *tr = &global_trace;
9124 unsigned int old_userobj;
9125 unsigned long flags;
9128 /* Only allow one dump user at a time. */
9129 if (atomic_inc_return(&dump_running) != 1) {
9130 atomic_dec(&dump_running);
9135 * Always turn off tracing when we dump.
9136 * We don't need to show trace output of what happens
9137 * between multiple crashes.
9139 * If the user does a sysrq-z, then they can re-enable
9140 * tracing with echo 1 > tracing_on.
9144 local_irq_save(flags);
9145 printk_nmi_direct_enter();
9147 /* Simulate the iterator */
9148 trace_init_global_iter(&iter);
9150 for_each_tracing_cpu(cpu) {
9151 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9154 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9156 /* don't look at user memory in panic mode */
9157 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9159 switch (oops_dump_mode) {
9161 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9164 iter.cpu_file = raw_smp_processor_id();
9169 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9170 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9173 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9175 /* Did function tracer already get disabled? */
9176 if (ftrace_is_dead()) {
9177 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9178 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9182 * We need to stop all tracing on all CPUS to read the
9183 * the next buffer. This is a bit expensive, but is
9184 * not done often. We fill all what we can read,
9185 * and then release the locks again.
9188 while (!trace_empty(&iter)) {
9191 printk(KERN_TRACE "---------------------------------\n");
9195 trace_iterator_reset(&iter);
9196 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9198 if (trace_find_next_entry_inc(&iter) != NULL) {
9201 ret = print_trace_line(&iter);
9202 if (ret != TRACE_TYPE_NO_CONSUME)
9203 trace_consume(&iter);
9205 touch_nmi_watchdog();
9207 trace_printk_seq(&iter.seq);
9211 printk(KERN_TRACE " (ftrace buffer empty)\n");
9213 printk(KERN_TRACE "---------------------------------\n");
9216 tr->trace_flags |= old_userobj;
9218 for_each_tracing_cpu(cpu) {
9219 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9221 atomic_dec(&dump_running);
9222 printk_nmi_direct_exit();
9223 local_irq_restore(flags);
9225 EXPORT_SYMBOL_GPL(ftrace_dump);
9227 int trace_run_command(const char *buf, int (*createfn)(int, char **))
9234 argv = argv_split(GFP_KERNEL, buf, &argc);
9239 ret = createfn(argc, argv);
9246 #define WRITE_BUFSIZE 4096
9248 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9249 size_t count, loff_t *ppos,
9250 int (*createfn)(int, char **))
9252 char *kbuf, *buf, *tmp;
9257 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9261 while (done < count) {
9262 size = count - done;
9264 if (size >= WRITE_BUFSIZE)
9265 size = WRITE_BUFSIZE - 1;
9267 if (copy_from_user(kbuf, buffer + done, size)) {
9274 tmp = strchr(buf, '\n');
9277 size = tmp - buf + 1;
9280 if (done + size < count) {
9283 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9284 pr_warn("Line length is too long: Should be less than %d\n",
9292 /* Remove comments */
9293 tmp = strchr(buf, '#');
9298 ret = trace_run_command(buf, createfn);
9303 } while (done < count);
9313 __init static int tracer_alloc_buffers(void)
9319 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9320 pr_warn("Tracing disabled due to lockdown\n");
9325 * Make sure we don't accidently add more trace options
9326 * than we have bits for.
9328 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9330 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9333 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9334 goto out_free_buffer_mask;
9336 /* Only allocate trace_printk buffers if a trace_printk exists */
9337 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
9338 /* Must be called before global_trace.buffer is allocated */
9339 trace_printk_init_buffers();
9341 /* To save memory, keep the ring buffer size to its minimum */
9342 if (ring_buffer_expanded)
9343 ring_buf_size = trace_buf_size;
9347 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9348 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9350 raw_spin_lock_init(&global_trace.start_lock);
9353 * The prepare callbacks allocates some memory for the ring buffer. We
9354 * don't free the buffer if the if the CPU goes down. If we were to free
9355 * the buffer, then the user would lose any trace that was in the
9356 * buffer. The memory will be removed once the "instance" is removed.
9358 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9359 "trace/RB:preapre", trace_rb_cpu_prepare,
9362 goto out_free_cpumask;
9363 /* Used for event triggers */
9365 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9367 goto out_rm_hp_state;
9369 if (trace_create_savedcmd() < 0)
9370 goto out_free_temp_buffer;
9372 /* TODO: make the number of buffers hot pluggable with CPUS */
9373 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9374 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9375 goto out_free_savedcmd;
9378 if (global_trace.buffer_disabled)
9381 if (trace_boot_clock) {
9382 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9384 pr_warn("Trace clock %s not defined, going back to default\n",
9389 * register_tracer() might reference current_trace, so it
9390 * needs to be set before we register anything. This is
9391 * just a bootstrap of current_trace anyway.
9393 global_trace.current_trace = &nop_trace;
9395 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9397 ftrace_init_global_array_ops(&global_trace);
9399 init_trace_flags_index(&global_trace);
9401 register_tracer(&nop_trace);
9403 /* Function tracing may start here (via kernel command line) */
9404 init_function_trace();
9406 /* All seems OK, enable tracing */
9407 tracing_disabled = 0;
9409 atomic_notifier_chain_register(&panic_notifier_list,
9410 &trace_panic_notifier);
9412 register_die_notifier(&trace_die_notifier);
9414 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9416 INIT_LIST_HEAD(&global_trace.systems);
9417 INIT_LIST_HEAD(&global_trace.events);
9418 INIT_LIST_HEAD(&global_trace.hist_vars);
9419 INIT_LIST_HEAD(&global_trace.err_log);
9420 list_add(&global_trace.list, &ftrace_trace_arrays);
9422 apply_trace_boot_options();
9424 register_snapshot_cmd();
9429 free_saved_cmdlines_buffer(savedcmd);
9430 out_free_temp_buffer:
9431 ring_buffer_free(temp_buffer);
9433 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9435 free_cpumask_var(global_trace.tracing_cpumask);
9436 out_free_buffer_mask:
9437 free_cpumask_var(tracing_buffer_mask);
9442 void __init early_trace_init(void)
9444 if (tracepoint_printk) {
9445 tracepoint_print_iter =
9446 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9447 if (MEM_FAIL(!tracepoint_print_iter,
9448 "Failed to allocate trace iterator\n"))
9449 tracepoint_printk = 0;
9451 static_key_enable(&tracepoint_printk_key.key);
9453 tracer_alloc_buffers();
9456 void __init trace_init(void)
9461 __init static int clear_boot_tracer(void)
9464 * The default tracer at boot buffer is an init section.
9465 * This function is called in lateinit. If we did not
9466 * find the boot tracer, then clear it out, to prevent
9467 * later registration from accessing the buffer that is
9468 * about to be freed.
9470 if (!default_bootup_tracer)
9473 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9474 default_bootup_tracer);
9475 default_bootup_tracer = NULL;
9480 fs_initcall(tracer_init_tracefs);
9481 late_initcall_sync(clear_boot_tracer);
9483 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9484 __init static int tracing_set_default_clock(void)
9486 /* sched_clock_stable() is determined in late_initcall */
9487 if (!trace_boot_clock && !sched_clock_stable()) {
9488 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9489 pr_warn("Can not set tracing clock due to lockdown\n");
9494 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9495 "If you want to keep using the local clock, then add:\n"
9496 " \"trace_clock=local\"\n"
9497 "on the kernel command line\n");
9498 tracing_set_clock(&global_trace, "global");
9503 late_initcall_sync(tracing_set_default_clock);