1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
53 #include "trace_output.h"
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
59 bool ring_buffer_expanded;
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
68 static bool __read_mostly tracing_selftest_running;
71 * If boot-time tracing including tracers/events via kernel cmdline
72 * is running, we do not want to run SELFTEST.
74 bool __read_mostly tracing_selftest_disabled;
76 #ifdef CONFIG_FTRACE_STARTUP_TEST
77 void __init disable_tracing_selftest(const char *reason)
79 if (!tracing_selftest_disabled) {
80 tracing_selftest_disabled = true;
81 pr_info("Ftrace startup test is disabled due to %s\n", reason);
86 /* Pipe tracepoints to printk */
87 struct trace_iterator *tracepoint_print_iter;
88 int tracepoint_printk;
89 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
91 /* For tracers that don't implement custom flags */
92 static struct tracer_opt dummy_tracer_opt[] = {
97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
103 * To prevent the comm cache from being overwritten when no
104 * tracing is active, only save the comm when a trace event
107 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
110 * Kill all tracing for good (never come back).
111 * It is initialized to 1 but will turn to zero if the initialization
112 * of the tracer is successful. But that is the only place that sets
115 static int tracing_disabled = 1;
117 cpumask_var_t __read_mostly tracing_buffer_mask;
120 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123 * is set, then ftrace_dump is called. This will output the contents
124 * of the ftrace buffers to the console. This is very useful for
125 * capturing traces that lead to crashes and outputing it to a
128 * It is default off, but you can enable it with either specifying
129 * "ftrace_dump_on_oops" in the kernel command line, or setting
130 * /proc/sys/kernel/ftrace_dump_on_oops
131 * Set 1 if you want to dump buffers of all CPUs
132 * Set 2 if you want to dump the buffer of the CPU that triggered oops
135 enum ftrace_dump_mode ftrace_dump_on_oops;
137 /* When set, tracing will stop when a WARN*() is hit */
138 int __disable_trace_on_warning;
140 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
141 /* Map of enums to their values, for "eval_map" file */
142 struct trace_eval_map_head {
144 unsigned long length;
147 union trace_eval_map_item;
149 struct trace_eval_map_tail {
151 * "end" is first and points to NULL as it must be different
152 * than "mod" or "eval_string"
154 union trace_eval_map_item *next;
155 const char *end; /* points to NULL */
158 static DEFINE_MUTEX(trace_eval_mutex);
161 * The trace_eval_maps are saved in an array with two extra elements,
162 * one at the beginning, and one at the end. The beginning item contains
163 * the count of the saved maps (head.length), and the module they
164 * belong to if not built in (head.mod). The ending item contains a
165 * pointer to the next array of saved eval_map items.
167 union trace_eval_map_item {
168 struct trace_eval_map map;
169 struct trace_eval_map_head head;
170 struct trace_eval_map_tail tail;
173 static union trace_eval_map_item *trace_eval_maps;
174 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
176 int tracing_set_tracer(struct trace_array *tr, const char *buf);
177 static void ftrace_trace_userstack(struct trace_array *tr,
178 struct trace_buffer *buffer,
179 unsigned int trace_ctx);
181 #define MAX_TRACER_SIZE 100
182 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
183 static char *default_bootup_tracer;
185 static bool allocate_snapshot;
187 static int __init set_cmdline_ftrace(char *str)
189 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
190 default_bootup_tracer = bootup_tracer_buf;
191 /* We are using ftrace early, expand it */
192 ring_buffer_expanded = true;
195 __setup("ftrace=", set_cmdline_ftrace);
197 static int __init set_ftrace_dump_on_oops(char *str)
199 if (*str++ != '=' || !*str) {
200 ftrace_dump_on_oops = DUMP_ALL;
204 if (!strcmp("orig_cpu", str)) {
205 ftrace_dump_on_oops = DUMP_ORIG;
211 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
213 static int __init stop_trace_on_warning(char *str)
215 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
216 __disable_trace_on_warning = 1;
219 __setup("traceoff_on_warning", stop_trace_on_warning);
221 static int __init boot_alloc_snapshot(char *str)
223 allocate_snapshot = true;
224 /* We also need the main ring buffer expanded */
225 ring_buffer_expanded = true;
228 __setup("alloc_snapshot", boot_alloc_snapshot);
231 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
233 static int __init set_trace_boot_options(char *str)
235 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
238 __setup("trace_options=", set_trace_boot_options);
240 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
241 static char *trace_boot_clock __initdata;
243 static int __init set_trace_boot_clock(char *str)
245 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
246 trace_boot_clock = trace_boot_clock_buf;
249 __setup("trace_clock=", set_trace_boot_clock);
251 static int __init set_tracepoint_printk(char *str)
253 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
254 tracepoint_printk = 1;
257 __setup("tp_printk", set_tracepoint_printk);
259 unsigned long long ns2usecs(u64 nsec)
267 trace_process_export(struct trace_export *export,
268 struct ring_buffer_event *event, int flag)
270 struct trace_entry *entry;
271 unsigned int size = 0;
273 if (export->flags & flag) {
274 entry = ring_buffer_event_data(event);
275 size = ring_buffer_event_length(event);
276 export->write(export, entry, size);
280 static DEFINE_MUTEX(ftrace_export_lock);
282 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
284 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
285 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
286 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
288 static inline void ftrace_exports_enable(struct trace_export *export)
290 if (export->flags & TRACE_EXPORT_FUNCTION)
291 static_branch_inc(&trace_function_exports_enabled);
293 if (export->flags & TRACE_EXPORT_EVENT)
294 static_branch_inc(&trace_event_exports_enabled);
296 if (export->flags & TRACE_EXPORT_MARKER)
297 static_branch_inc(&trace_marker_exports_enabled);
300 static inline void ftrace_exports_disable(struct trace_export *export)
302 if (export->flags & TRACE_EXPORT_FUNCTION)
303 static_branch_dec(&trace_function_exports_enabled);
305 if (export->flags & TRACE_EXPORT_EVENT)
306 static_branch_dec(&trace_event_exports_enabled);
308 if (export->flags & TRACE_EXPORT_MARKER)
309 static_branch_dec(&trace_marker_exports_enabled);
312 static void ftrace_exports(struct ring_buffer_event *event, int flag)
314 struct trace_export *export;
316 preempt_disable_notrace();
318 export = rcu_dereference_raw_check(ftrace_exports_list);
320 trace_process_export(export, event, flag);
321 export = rcu_dereference_raw_check(export->next);
324 preempt_enable_notrace();
328 add_trace_export(struct trace_export **list, struct trace_export *export)
330 rcu_assign_pointer(export->next, *list);
332 * We are entering export into the list but another
333 * CPU might be walking that list. We need to make sure
334 * the export->next pointer is valid before another CPU sees
335 * the export pointer included into the list.
337 rcu_assign_pointer(*list, export);
341 rm_trace_export(struct trace_export **list, struct trace_export *export)
343 struct trace_export **p;
345 for (p = list; *p != NULL; p = &(*p)->next)
352 rcu_assign_pointer(*p, (*p)->next);
358 add_ftrace_export(struct trace_export **list, struct trace_export *export)
360 ftrace_exports_enable(export);
362 add_trace_export(list, export);
366 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
370 ret = rm_trace_export(list, export);
371 ftrace_exports_disable(export);
376 int register_ftrace_export(struct trace_export *export)
378 if (WARN_ON_ONCE(!export->write))
381 mutex_lock(&ftrace_export_lock);
383 add_ftrace_export(&ftrace_exports_list, export);
385 mutex_unlock(&ftrace_export_lock);
389 EXPORT_SYMBOL_GPL(register_ftrace_export);
391 int unregister_ftrace_export(struct trace_export *export)
395 mutex_lock(&ftrace_export_lock);
397 ret = rm_ftrace_export(&ftrace_exports_list, export);
399 mutex_unlock(&ftrace_export_lock);
403 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
405 /* trace_flags holds trace_options default values */
406 #define TRACE_DEFAULT_FLAGS \
407 (FUNCTION_DEFAULT_FLAGS | \
408 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
409 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
410 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
411 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
414 /* trace_options that are only supported by global_trace */
415 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
416 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
418 /* trace_flags that are default zero for instances */
419 #define ZEROED_TRACE_FLAGS \
420 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
423 * The global_trace is the descriptor that holds the top-level tracing
424 * buffers for the live tracing.
426 static struct trace_array global_trace = {
427 .trace_flags = TRACE_DEFAULT_FLAGS,
430 LIST_HEAD(ftrace_trace_arrays);
432 int trace_array_get(struct trace_array *this_tr)
434 struct trace_array *tr;
437 mutex_lock(&trace_types_lock);
438 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
445 mutex_unlock(&trace_types_lock);
450 static void __trace_array_put(struct trace_array *this_tr)
452 WARN_ON(!this_tr->ref);
457 * trace_array_put - Decrement the reference counter for this trace array.
458 * @this_tr : pointer to the trace array
460 * NOTE: Use this when we no longer need the trace array returned by
461 * trace_array_get_by_name(). This ensures the trace array can be later
465 void trace_array_put(struct trace_array *this_tr)
470 mutex_lock(&trace_types_lock);
471 __trace_array_put(this_tr);
472 mutex_unlock(&trace_types_lock);
474 EXPORT_SYMBOL_GPL(trace_array_put);
476 int tracing_check_open_get_tr(struct trace_array *tr)
480 ret = security_locked_down(LOCKDOWN_TRACEFS);
484 if (tracing_disabled)
487 if (tr && trace_array_get(tr) < 0)
493 int call_filter_check_discard(struct trace_event_call *call, void *rec,
494 struct trace_buffer *buffer,
495 struct ring_buffer_event *event)
497 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
498 !filter_match_preds(call->filter, rec)) {
499 __trace_event_discard_commit(buffer, event);
506 void trace_free_pid_list(struct trace_pid_list *pid_list)
508 vfree(pid_list->pids);
513 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
514 * @filtered_pids: The list of pids to check
515 * @search_pid: The PID to find in @filtered_pids
517 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
520 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
523 * If pid_max changed after filtered_pids was created, we
524 * by default ignore all pids greater than the previous pid_max.
526 if (search_pid >= filtered_pids->pid_max)
529 return test_bit(search_pid, filtered_pids->pids);
533 * trace_ignore_this_task - should a task be ignored for tracing
534 * @filtered_pids: The list of pids to check
535 * @filtered_no_pids: The list of pids not to be traced
536 * @task: The task that should be ignored if not filtered
538 * Checks if @task should be traced or not from @filtered_pids.
539 * Returns true if @task should *NOT* be traced.
540 * Returns false if @task should be traced.
543 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
544 struct trace_pid_list *filtered_no_pids,
545 struct task_struct *task)
548 * If filterd_no_pids is not empty, and the task's pid is listed
549 * in filtered_no_pids, then return true.
550 * Otherwise, if filtered_pids is empty, that means we can
551 * trace all tasks. If it has content, then only trace pids
552 * within filtered_pids.
555 return (filtered_pids &&
556 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
558 trace_find_filtered_pid(filtered_no_pids, task->pid));
562 * trace_filter_add_remove_task - Add or remove a task from a pid_list
563 * @pid_list: The list to modify
564 * @self: The current task for fork or NULL for exit
565 * @task: The task to add or remove
567 * If adding a task, if @self is defined, the task is only added if @self
568 * is also included in @pid_list. This happens on fork and tasks should
569 * only be added when the parent is listed. If @self is NULL, then the
570 * @task pid will be removed from the list, which would happen on exit
573 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
574 struct task_struct *self,
575 struct task_struct *task)
580 /* For forks, we only add if the forking task is listed */
582 if (!trace_find_filtered_pid(pid_list, self->pid))
586 /* Sorry, but we don't support pid_max changing after setting */
587 if (task->pid >= pid_list->pid_max)
590 /* "self" is set for forks, and NULL for exits */
592 set_bit(task->pid, pid_list->pids);
594 clear_bit(task->pid, pid_list->pids);
598 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
599 * @pid_list: The pid list to show
600 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
601 * @pos: The position of the file
603 * This is used by the seq_file "next" operation to iterate the pids
604 * listed in a trace_pid_list structure.
606 * Returns the pid+1 as we want to display pid of zero, but NULL would
607 * stop the iteration.
609 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
611 unsigned long pid = (unsigned long)v;
615 /* pid already is +1 of the actual prevous bit */
616 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
618 /* Return pid + 1 to allow zero to be represented */
619 if (pid < pid_list->pid_max)
620 return (void *)(pid + 1);
626 * trace_pid_start - Used for seq_file to start reading pid lists
627 * @pid_list: The pid list to show
628 * @pos: The position of the file
630 * This is used by seq_file "start" operation to start the iteration
633 * Returns the pid+1 as we want to display pid of zero, but NULL would
634 * stop the iteration.
636 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
641 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
642 if (pid >= pid_list->pid_max)
645 /* Return pid + 1 so that zero can be the exit value */
646 for (pid++; pid && l < *pos;
647 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
653 * trace_pid_show - show the current pid in seq_file processing
654 * @m: The seq_file structure to write into
655 * @v: A void pointer of the pid (+1) value to display
657 * Can be directly used by seq_file operations to display the current
660 int trace_pid_show(struct seq_file *m, void *v)
662 unsigned long pid = (unsigned long)v - 1;
664 seq_printf(m, "%lu\n", pid);
668 /* 128 should be much more than enough */
669 #define PID_BUF_SIZE 127
671 int trace_pid_write(struct trace_pid_list *filtered_pids,
672 struct trace_pid_list **new_pid_list,
673 const char __user *ubuf, size_t cnt)
675 struct trace_pid_list *pid_list;
676 struct trace_parser parser;
684 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
688 * Always recreate a new array. The write is an all or nothing
689 * operation. Always create a new array when adding new pids by
690 * the user. If the operation fails, then the current list is
693 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
695 trace_parser_put(&parser);
699 pid_list->pid_max = READ_ONCE(pid_max);
701 /* Only truncating will shrink pid_max */
702 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
703 pid_list->pid_max = filtered_pids->pid_max;
705 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
706 if (!pid_list->pids) {
707 trace_parser_put(&parser);
713 /* copy the current bits to the new max */
714 for_each_set_bit(pid, filtered_pids->pids,
715 filtered_pids->pid_max) {
716 set_bit(pid, pid_list->pids);
725 ret = trace_get_user(&parser, ubuf, cnt, &pos);
726 if (ret < 0 || !trace_parser_loaded(&parser))
734 if (kstrtoul(parser.buffer, 0, &val))
736 if (val >= pid_list->pid_max)
741 set_bit(pid, pid_list->pids);
744 trace_parser_clear(&parser);
747 trace_parser_put(&parser);
750 trace_free_pid_list(pid_list);
755 /* Cleared the list of pids */
756 trace_free_pid_list(pid_list);
761 *new_pid_list = pid_list;
766 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
770 /* Early boot up does not have a buffer yet */
772 return trace_clock_local();
774 ts = ring_buffer_time_stamp(buf->buffer, cpu);
775 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
780 u64 ftrace_now(int cpu)
782 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
786 * tracing_is_enabled - Show if global_trace has been enabled
788 * Shows if the global trace has been enabled or not. It uses the
789 * mirror flag "buffer_disabled" to be used in fast paths such as for
790 * the irqsoff tracer. But it may be inaccurate due to races. If you
791 * need to know the accurate state, use tracing_is_on() which is a little
792 * slower, but accurate.
794 int tracing_is_enabled(void)
797 * For quick access (irqsoff uses this in fast path), just
798 * return the mirror variable of the state of the ring buffer.
799 * It's a little racy, but we don't really care.
802 return !global_trace.buffer_disabled;
806 * trace_buf_size is the size in bytes that is allocated
807 * for a buffer. Note, the number of bytes is always rounded
810 * This number is purposely set to a low number of 16384.
811 * If the dump on oops happens, it will be much appreciated
812 * to not have to wait for all that output. Anyway this can be
813 * boot time and run time configurable.
815 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
817 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
819 /* trace_types holds a link list of available tracers. */
820 static struct tracer *trace_types __read_mostly;
823 * trace_types_lock is used to protect the trace_types list.
825 DEFINE_MUTEX(trace_types_lock);
828 * serialize the access of the ring buffer
830 * ring buffer serializes readers, but it is low level protection.
831 * The validity of the events (which returns by ring_buffer_peek() ..etc)
832 * are not protected by ring buffer.
834 * The content of events may become garbage if we allow other process consumes
835 * these events concurrently:
836 * A) the page of the consumed events may become a normal page
837 * (not reader page) in ring buffer, and this page will be rewrited
838 * by events producer.
839 * B) The page of the consumed events may become a page for splice_read,
840 * and this page will be returned to system.
842 * These primitives allow multi process access to different cpu ring buffer
845 * These primitives don't distinguish read-only and read-consume access.
846 * Multi read-only access are also serialized.
850 static DECLARE_RWSEM(all_cpu_access_lock);
851 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
853 static inline void trace_access_lock(int cpu)
855 if (cpu == RING_BUFFER_ALL_CPUS) {
856 /* gain it for accessing the whole ring buffer. */
857 down_write(&all_cpu_access_lock);
859 /* gain it for accessing a cpu ring buffer. */
861 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
862 down_read(&all_cpu_access_lock);
864 /* Secondly block other access to this @cpu ring buffer. */
865 mutex_lock(&per_cpu(cpu_access_lock, cpu));
869 static inline void trace_access_unlock(int cpu)
871 if (cpu == RING_BUFFER_ALL_CPUS) {
872 up_write(&all_cpu_access_lock);
874 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
875 up_read(&all_cpu_access_lock);
879 static inline void trace_access_lock_init(void)
883 for_each_possible_cpu(cpu)
884 mutex_init(&per_cpu(cpu_access_lock, cpu));
889 static DEFINE_MUTEX(access_lock);
891 static inline void trace_access_lock(int cpu)
894 mutex_lock(&access_lock);
897 static inline void trace_access_unlock(int cpu)
900 mutex_unlock(&access_lock);
903 static inline void trace_access_lock_init(void)
909 #ifdef CONFIG_STACKTRACE
910 static void __ftrace_trace_stack(struct trace_buffer *buffer,
911 unsigned int trace_ctx,
912 int skip, struct pt_regs *regs);
913 static inline void ftrace_trace_stack(struct trace_array *tr,
914 struct trace_buffer *buffer,
915 unsigned int trace_ctx,
916 int skip, struct pt_regs *regs);
919 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
920 unsigned int trace_ctx,
921 int skip, struct pt_regs *regs)
924 static inline void ftrace_trace_stack(struct trace_array *tr,
925 struct trace_buffer *buffer,
926 unsigned long trace_ctx,
927 int skip, struct pt_regs *regs)
933 static __always_inline void
934 trace_event_setup(struct ring_buffer_event *event,
935 int type, unsigned int trace_ctx)
937 struct trace_entry *ent = ring_buffer_event_data(event);
939 tracing_generic_entry_update(ent, type, trace_ctx);
942 static __always_inline struct ring_buffer_event *
943 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
946 unsigned int trace_ctx)
948 struct ring_buffer_event *event;
950 event = ring_buffer_lock_reserve(buffer, len);
952 trace_event_setup(event, type, trace_ctx);
957 void tracer_tracing_on(struct trace_array *tr)
959 if (tr->array_buffer.buffer)
960 ring_buffer_record_on(tr->array_buffer.buffer);
962 * This flag is looked at when buffers haven't been allocated
963 * yet, or by some tracers (like irqsoff), that just want to
964 * know if the ring buffer has been disabled, but it can handle
965 * races of where it gets disabled but we still do a record.
966 * As the check is in the fast path of the tracers, it is more
967 * important to be fast than accurate.
969 tr->buffer_disabled = 0;
970 /* Make the flag seen by readers */
975 * tracing_on - enable tracing buffers
977 * This function enables tracing buffers that may have been
978 * disabled with tracing_off.
980 void tracing_on(void)
982 tracer_tracing_on(&global_trace);
984 EXPORT_SYMBOL_GPL(tracing_on);
987 static __always_inline void
988 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
990 __this_cpu_write(trace_taskinfo_save, true);
992 /* If this is the temp buffer, we need to commit fully */
993 if (this_cpu_read(trace_buffered_event) == event) {
994 /* Length is in event->array[0] */
995 ring_buffer_write(buffer, event->array[0], &event->array[1]);
996 /* Release the temp buffer */
997 this_cpu_dec(trace_buffered_event_cnt);
999 ring_buffer_unlock_commit(buffer, event);
1003 * __trace_puts - write a constant string into the trace buffer.
1004 * @ip: The address of the caller
1005 * @str: The constant string to write
1006 * @size: The size of the string.
1008 int __trace_puts(unsigned long ip, const char *str, int size)
1010 struct ring_buffer_event *event;
1011 struct trace_buffer *buffer;
1012 struct print_entry *entry;
1013 unsigned int trace_ctx;
1016 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1019 if (unlikely(tracing_selftest_running || tracing_disabled))
1022 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1024 trace_ctx = tracing_gen_ctx();
1025 buffer = global_trace.array_buffer.buffer;
1026 ring_buffer_nest_start(buffer);
1027 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1034 entry = ring_buffer_event_data(event);
1037 memcpy(&entry->buf, str, size);
1039 /* Add a newline if necessary */
1040 if (entry->buf[size - 1] != '\n') {
1041 entry->buf[size] = '\n';
1042 entry->buf[size + 1] = '\0';
1044 entry->buf[size] = '\0';
1046 __buffer_unlock_commit(buffer, event);
1047 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1049 ring_buffer_nest_end(buffer);
1052 EXPORT_SYMBOL_GPL(__trace_puts);
1055 * __trace_bputs - write the pointer to a constant string into trace buffer
1056 * @ip: The address of the caller
1057 * @str: The constant string to write to the buffer to
1059 int __trace_bputs(unsigned long ip, const char *str)
1061 struct ring_buffer_event *event;
1062 struct trace_buffer *buffer;
1063 struct bputs_entry *entry;
1064 unsigned int trace_ctx;
1065 int size = sizeof(struct bputs_entry);
1068 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1071 if (unlikely(tracing_selftest_running || tracing_disabled))
1074 trace_ctx = tracing_gen_ctx();
1075 buffer = global_trace.array_buffer.buffer;
1077 ring_buffer_nest_start(buffer);
1078 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1083 entry = ring_buffer_event_data(event);
1087 __buffer_unlock_commit(buffer, event);
1088 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1092 ring_buffer_nest_end(buffer);
1095 EXPORT_SYMBOL_GPL(__trace_bputs);
1097 #ifdef CONFIG_TRACER_SNAPSHOT
1098 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1101 struct tracer *tracer = tr->current_trace;
1102 unsigned long flags;
1105 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1106 internal_trace_puts("*** snapshot is being ignored ***\n");
1110 if (!tr->allocated_snapshot) {
1111 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1112 internal_trace_puts("*** stopping trace here! ***\n");
1117 /* Note, snapshot can not be used when the tracer uses it */
1118 if (tracer->use_max_tr) {
1119 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1120 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1124 local_irq_save(flags);
1125 update_max_tr(tr, current, smp_processor_id(), cond_data);
1126 local_irq_restore(flags);
1129 void tracing_snapshot_instance(struct trace_array *tr)
1131 tracing_snapshot_instance_cond(tr, NULL);
1135 * tracing_snapshot - take a snapshot of the current buffer.
1137 * This causes a swap between the snapshot buffer and the current live
1138 * tracing buffer. You can use this to take snapshots of the live
1139 * trace when some condition is triggered, but continue to trace.
1141 * Note, make sure to allocate the snapshot with either
1142 * a tracing_snapshot_alloc(), or by doing it manually
1143 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1145 * If the snapshot buffer is not allocated, it will stop tracing.
1146 * Basically making a permanent snapshot.
1148 void tracing_snapshot(void)
1150 struct trace_array *tr = &global_trace;
1152 tracing_snapshot_instance(tr);
1154 EXPORT_SYMBOL_GPL(tracing_snapshot);
1157 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1158 * @tr: The tracing instance to snapshot
1159 * @cond_data: The data to be tested conditionally, and possibly saved
1161 * This is the same as tracing_snapshot() except that the snapshot is
1162 * conditional - the snapshot will only happen if the
1163 * cond_snapshot.update() implementation receiving the cond_data
1164 * returns true, which means that the trace array's cond_snapshot
1165 * update() operation used the cond_data to determine whether the
1166 * snapshot should be taken, and if it was, presumably saved it along
1167 * with the snapshot.
1169 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1171 tracing_snapshot_instance_cond(tr, cond_data);
1173 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1176 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1177 * @tr: The tracing instance
1179 * When the user enables a conditional snapshot using
1180 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1181 * with the snapshot. This accessor is used to retrieve it.
1183 * Should not be called from cond_snapshot.update(), since it takes
1184 * the tr->max_lock lock, which the code calling
1185 * cond_snapshot.update() has already done.
1187 * Returns the cond_data associated with the trace array's snapshot.
1189 void *tracing_cond_snapshot_data(struct trace_array *tr)
1191 void *cond_data = NULL;
1193 arch_spin_lock(&tr->max_lock);
1195 if (tr->cond_snapshot)
1196 cond_data = tr->cond_snapshot->cond_data;
1198 arch_spin_unlock(&tr->max_lock);
1202 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1204 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1205 struct array_buffer *size_buf, int cpu_id);
1206 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1208 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1212 if (!tr->allocated_snapshot) {
1214 /* allocate spare buffer */
1215 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1216 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1220 tr->allocated_snapshot = true;
1226 static void free_snapshot(struct trace_array *tr)
1229 * We don't free the ring buffer. instead, resize it because
1230 * The max_tr ring buffer has some state (e.g. ring->clock) and
1231 * we want preserve it.
1233 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1234 set_buffer_entries(&tr->max_buffer, 1);
1235 tracing_reset_online_cpus(&tr->max_buffer);
1236 tr->allocated_snapshot = false;
1240 * tracing_alloc_snapshot - allocate snapshot buffer.
1242 * This only allocates the snapshot buffer if it isn't already
1243 * allocated - it doesn't also take a snapshot.
1245 * This is meant to be used in cases where the snapshot buffer needs
1246 * to be set up for events that can't sleep but need to be able to
1247 * trigger a snapshot.
1249 int tracing_alloc_snapshot(void)
1251 struct trace_array *tr = &global_trace;
1254 ret = tracing_alloc_snapshot_instance(tr);
1259 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1262 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1264 * This is similar to tracing_snapshot(), but it will allocate the
1265 * snapshot buffer if it isn't already allocated. Use this only
1266 * where it is safe to sleep, as the allocation may sleep.
1268 * This causes a swap between the snapshot buffer and the current live
1269 * tracing buffer. You can use this to take snapshots of the live
1270 * trace when some condition is triggered, but continue to trace.
1272 void tracing_snapshot_alloc(void)
1276 ret = tracing_alloc_snapshot();
1282 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1285 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1286 * @tr: The tracing instance
1287 * @cond_data: User data to associate with the snapshot
1288 * @update: Implementation of the cond_snapshot update function
1290 * Check whether the conditional snapshot for the given instance has
1291 * already been enabled, or if the current tracer is already using a
1292 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1293 * save the cond_data and update function inside.
1295 * Returns 0 if successful, error otherwise.
1297 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1298 cond_update_fn_t update)
1300 struct cond_snapshot *cond_snapshot;
1303 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1307 cond_snapshot->cond_data = cond_data;
1308 cond_snapshot->update = update;
1310 mutex_lock(&trace_types_lock);
1312 ret = tracing_alloc_snapshot_instance(tr);
1316 if (tr->current_trace->use_max_tr) {
1322 * The cond_snapshot can only change to NULL without the
1323 * trace_types_lock. We don't care if we race with it going
1324 * to NULL, but we want to make sure that it's not set to
1325 * something other than NULL when we get here, which we can
1326 * do safely with only holding the trace_types_lock and not
1327 * having to take the max_lock.
1329 if (tr->cond_snapshot) {
1334 arch_spin_lock(&tr->max_lock);
1335 tr->cond_snapshot = cond_snapshot;
1336 arch_spin_unlock(&tr->max_lock);
1338 mutex_unlock(&trace_types_lock);
1343 mutex_unlock(&trace_types_lock);
1344 kfree(cond_snapshot);
1347 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1350 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1351 * @tr: The tracing instance
1353 * Check whether the conditional snapshot for the given instance is
1354 * enabled; if so, free the cond_snapshot associated with it,
1355 * otherwise return -EINVAL.
1357 * Returns 0 if successful, error otherwise.
1359 int tracing_snapshot_cond_disable(struct trace_array *tr)
1363 arch_spin_lock(&tr->max_lock);
1365 if (!tr->cond_snapshot)
1368 kfree(tr->cond_snapshot);
1369 tr->cond_snapshot = NULL;
1372 arch_spin_unlock(&tr->max_lock);
1376 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1378 void tracing_snapshot(void)
1380 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1382 EXPORT_SYMBOL_GPL(tracing_snapshot);
1383 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1385 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1387 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1388 int tracing_alloc_snapshot(void)
1390 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1393 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1394 void tracing_snapshot_alloc(void)
1399 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1400 void *tracing_cond_snapshot_data(struct trace_array *tr)
1404 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1405 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1409 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1410 int tracing_snapshot_cond_disable(struct trace_array *tr)
1414 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1415 #endif /* CONFIG_TRACER_SNAPSHOT */
1417 void tracer_tracing_off(struct trace_array *tr)
1419 if (tr->array_buffer.buffer)
1420 ring_buffer_record_off(tr->array_buffer.buffer);
1422 * This flag is looked at when buffers haven't been allocated
1423 * yet, or by some tracers (like irqsoff), that just want to
1424 * know if the ring buffer has been disabled, but it can handle
1425 * races of where it gets disabled but we still do a record.
1426 * As the check is in the fast path of the tracers, it is more
1427 * important to be fast than accurate.
1429 tr->buffer_disabled = 1;
1430 /* Make the flag seen by readers */
1435 * tracing_off - turn off tracing buffers
1437 * This function stops the tracing buffers from recording data.
1438 * It does not disable any overhead the tracers themselves may
1439 * be causing. This function simply causes all recording to
1440 * the ring buffers to fail.
1442 void tracing_off(void)
1444 tracer_tracing_off(&global_trace);
1446 EXPORT_SYMBOL_GPL(tracing_off);
1448 void disable_trace_on_warning(void)
1450 if (__disable_trace_on_warning) {
1451 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1452 "Disabling tracing due to warning\n");
1458 * tracer_tracing_is_on - show real state of ring buffer enabled
1459 * @tr : the trace array to know if ring buffer is enabled
1461 * Shows real state of the ring buffer if it is enabled or not.
1463 bool tracer_tracing_is_on(struct trace_array *tr)
1465 if (tr->array_buffer.buffer)
1466 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1467 return !tr->buffer_disabled;
1471 * tracing_is_on - show state of ring buffers enabled
1473 int tracing_is_on(void)
1475 return tracer_tracing_is_on(&global_trace);
1477 EXPORT_SYMBOL_GPL(tracing_is_on);
1479 static int __init set_buf_size(char *str)
1481 unsigned long buf_size;
1485 buf_size = memparse(str, &str);
1486 /* nr_entries can not be zero */
1489 trace_buf_size = buf_size;
1492 __setup("trace_buf_size=", set_buf_size);
1494 static int __init set_tracing_thresh(char *str)
1496 unsigned long threshold;
1501 ret = kstrtoul(str, 0, &threshold);
1504 tracing_thresh = threshold * 1000;
1507 __setup("tracing_thresh=", set_tracing_thresh);
1509 unsigned long nsecs_to_usecs(unsigned long nsecs)
1511 return nsecs / 1000;
1515 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1516 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1517 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1518 * of strings in the order that the evals (enum) were defined.
1523 /* These must match the bit postions in trace_iterator_flags */
1524 static const char *trace_options[] = {
1532 int in_ns; /* is this clock in nanoseconds? */
1533 } trace_clocks[] = {
1534 { trace_clock_local, "local", 1 },
1535 { trace_clock_global, "global", 1 },
1536 { trace_clock_counter, "counter", 0 },
1537 { trace_clock_jiffies, "uptime", 0 },
1538 { trace_clock, "perf", 1 },
1539 { ktime_get_mono_fast_ns, "mono", 1 },
1540 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1541 { ktime_get_boot_fast_ns, "boot", 1 },
1545 bool trace_clock_in_ns(struct trace_array *tr)
1547 if (trace_clocks[tr->clock_id].in_ns)
1554 * trace_parser_get_init - gets the buffer for trace parser
1556 int trace_parser_get_init(struct trace_parser *parser, int size)
1558 memset(parser, 0, sizeof(*parser));
1560 parser->buffer = kmalloc(size, GFP_KERNEL);
1561 if (!parser->buffer)
1564 parser->size = size;
1569 * trace_parser_put - frees the buffer for trace parser
1571 void trace_parser_put(struct trace_parser *parser)
1573 kfree(parser->buffer);
1574 parser->buffer = NULL;
1578 * trace_get_user - reads the user input string separated by space
1579 * (matched by isspace(ch))
1581 * For each string found the 'struct trace_parser' is updated,
1582 * and the function returns.
1584 * Returns number of bytes read.
1586 * See kernel/trace/trace.h for 'struct trace_parser' details.
1588 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1589 size_t cnt, loff_t *ppos)
1596 trace_parser_clear(parser);
1598 ret = get_user(ch, ubuf++);
1606 * The parser is not finished with the last write,
1607 * continue reading the user input without skipping spaces.
1609 if (!parser->cont) {
1610 /* skip white space */
1611 while (cnt && isspace(ch)) {
1612 ret = get_user(ch, ubuf++);
1621 /* only spaces were written */
1622 if (isspace(ch) || !ch) {
1629 /* read the non-space input */
1630 while (cnt && !isspace(ch) && ch) {
1631 if (parser->idx < parser->size - 1)
1632 parser->buffer[parser->idx++] = ch;
1637 ret = get_user(ch, ubuf++);
1644 /* We either got finished input or we have to wait for another call. */
1645 if (isspace(ch) || !ch) {
1646 parser->buffer[parser->idx] = 0;
1647 parser->cont = false;
1648 } else if (parser->idx < parser->size - 1) {
1649 parser->cont = true;
1650 parser->buffer[parser->idx++] = ch;
1651 /* Make sure the parsed string always terminates with '\0'. */
1652 parser->buffer[parser->idx] = 0;
1665 /* TODO add a seq_buf_to_buffer() */
1666 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1670 if (trace_seq_used(s) <= s->seq.readpos)
1673 len = trace_seq_used(s) - s->seq.readpos;
1676 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1678 s->seq.readpos += cnt;
1682 unsigned long __read_mostly tracing_thresh;
1683 static const struct file_operations tracing_max_lat_fops;
1685 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1686 defined(CONFIG_FSNOTIFY)
1688 static struct workqueue_struct *fsnotify_wq;
1690 static void latency_fsnotify_workfn(struct work_struct *work)
1692 struct trace_array *tr = container_of(work, struct trace_array,
1694 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1697 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1699 struct trace_array *tr = container_of(iwork, struct trace_array,
1701 queue_work(fsnotify_wq, &tr->fsnotify_work);
1704 static void trace_create_maxlat_file(struct trace_array *tr,
1705 struct dentry *d_tracer)
1707 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1708 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1709 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1710 d_tracer, &tr->max_latency,
1711 &tracing_max_lat_fops);
1714 __init static int latency_fsnotify_init(void)
1716 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1717 WQ_UNBOUND | WQ_HIGHPRI, 0);
1719 pr_err("Unable to allocate tr_max_lat_wq\n");
1725 late_initcall_sync(latency_fsnotify_init);
1727 void latency_fsnotify(struct trace_array *tr)
1732 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1733 * possible that we are called from __schedule() or do_idle(), which
1734 * could cause a deadlock.
1736 irq_work_queue(&tr->fsnotify_irqwork);
1740 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1741 * defined(CONFIG_FSNOTIFY)
1745 #define trace_create_maxlat_file(tr, d_tracer) \
1746 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1747 &tr->max_latency, &tracing_max_lat_fops)
1751 #ifdef CONFIG_TRACER_MAX_TRACE
1753 * Copy the new maximum trace into the separate maximum-trace
1754 * structure. (this way the maximum trace is permanently saved,
1755 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1758 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1760 struct array_buffer *trace_buf = &tr->array_buffer;
1761 struct array_buffer *max_buf = &tr->max_buffer;
1762 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1763 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1766 max_buf->time_start = data->preempt_timestamp;
1768 max_data->saved_latency = tr->max_latency;
1769 max_data->critical_start = data->critical_start;
1770 max_data->critical_end = data->critical_end;
1772 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1773 max_data->pid = tsk->pid;
1775 * If tsk == current, then use current_uid(), as that does not use
1776 * RCU. The irq tracer can be called out of RCU scope.
1779 max_data->uid = current_uid();
1781 max_data->uid = task_uid(tsk);
1783 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1784 max_data->policy = tsk->policy;
1785 max_data->rt_priority = tsk->rt_priority;
1787 /* record this tasks comm */
1788 tracing_record_cmdline(tsk);
1789 latency_fsnotify(tr);
1793 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1795 * @tsk: the task with the latency
1796 * @cpu: The cpu that initiated the trace.
1797 * @cond_data: User data associated with a conditional snapshot
1799 * Flip the buffers between the @tr and the max_tr and record information
1800 * about which task was the cause of this latency.
1803 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1809 WARN_ON_ONCE(!irqs_disabled());
1811 if (!tr->allocated_snapshot) {
1812 /* Only the nop tracer should hit this when disabling */
1813 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1817 arch_spin_lock(&tr->max_lock);
1819 /* Inherit the recordable setting from array_buffer */
1820 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1821 ring_buffer_record_on(tr->max_buffer.buffer);
1823 ring_buffer_record_off(tr->max_buffer.buffer);
1825 #ifdef CONFIG_TRACER_SNAPSHOT
1826 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1829 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1831 __update_max_tr(tr, tsk, cpu);
1834 arch_spin_unlock(&tr->max_lock);
1838 * update_max_tr_single - only copy one trace over, and reset the rest
1840 * @tsk: task with the latency
1841 * @cpu: the cpu of the buffer to copy.
1843 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1846 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1853 WARN_ON_ONCE(!irqs_disabled());
1854 if (!tr->allocated_snapshot) {
1855 /* Only the nop tracer should hit this when disabling */
1856 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1860 arch_spin_lock(&tr->max_lock);
1862 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1864 if (ret == -EBUSY) {
1866 * We failed to swap the buffer due to a commit taking
1867 * place on this CPU. We fail to record, but we reset
1868 * the max trace buffer (no one writes directly to it)
1869 * and flag that it failed.
1871 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1872 "Failed to swap buffers due to commit in progress\n");
1875 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1877 __update_max_tr(tr, tsk, cpu);
1878 arch_spin_unlock(&tr->max_lock);
1880 #endif /* CONFIG_TRACER_MAX_TRACE */
1882 static int wait_on_pipe(struct trace_iterator *iter, int full)
1884 /* Iterators are static, they should be filled or empty */
1885 if (trace_buffer_iter(iter, iter->cpu_file))
1888 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1892 #ifdef CONFIG_FTRACE_STARTUP_TEST
1893 static bool selftests_can_run;
1895 struct trace_selftests {
1896 struct list_head list;
1897 struct tracer *type;
1900 static LIST_HEAD(postponed_selftests);
1902 static int save_selftest(struct tracer *type)
1904 struct trace_selftests *selftest;
1906 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1910 selftest->type = type;
1911 list_add(&selftest->list, &postponed_selftests);
1915 static int run_tracer_selftest(struct tracer *type)
1917 struct trace_array *tr = &global_trace;
1918 struct tracer *saved_tracer = tr->current_trace;
1921 if (!type->selftest || tracing_selftest_disabled)
1925 * If a tracer registers early in boot up (before scheduling is
1926 * initialized and such), then do not run its selftests yet.
1927 * Instead, run it a little later in the boot process.
1929 if (!selftests_can_run)
1930 return save_selftest(type);
1932 if (!tracing_is_on()) {
1933 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1939 * Run a selftest on this tracer.
1940 * Here we reset the trace buffer, and set the current
1941 * tracer to be this tracer. The tracer can then run some
1942 * internal tracing to verify that everything is in order.
1943 * If we fail, we do not register this tracer.
1945 tracing_reset_online_cpus(&tr->array_buffer);
1947 tr->current_trace = type;
1949 #ifdef CONFIG_TRACER_MAX_TRACE
1950 if (type->use_max_tr) {
1951 /* If we expanded the buffers, make sure the max is expanded too */
1952 if (ring_buffer_expanded)
1953 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1954 RING_BUFFER_ALL_CPUS);
1955 tr->allocated_snapshot = true;
1959 /* the test is responsible for initializing and enabling */
1960 pr_info("Testing tracer %s: ", type->name);
1961 ret = type->selftest(type, tr);
1962 /* the test is responsible for resetting too */
1963 tr->current_trace = saved_tracer;
1965 printk(KERN_CONT "FAILED!\n");
1966 /* Add the warning after printing 'FAILED' */
1970 /* Only reset on passing, to avoid touching corrupted buffers */
1971 tracing_reset_online_cpus(&tr->array_buffer);
1973 #ifdef CONFIG_TRACER_MAX_TRACE
1974 if (type->use_max_tr) {
1975 tr->allocated_snapshot = false;
1977 /* Shrink the max buffer again */
1978 if (ring_buffer_expanded)
1979 ring_buffer_resize(tr->max_buffer.buffer, 1,
1980 RING_BUFFER_ALL_CPUS);
1984 printk(KERN_CONT "PASSED\n");
1988 static __init int init_trace_selftests(void)
1990 struct trace_selftests *p, *n;
1991 struct tracer *t, **last;
1994 selftests_can_run = true;
1996 mutex_lock(&trace_types_lock);
1998 if (list_empty(&postponed_selftests))
2001 pr_info("Running postponed tracer tests:\n");
2003 tracing_selftest_running = true;
2004 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2005 /* This loop can take minutes when sanitizers are enabled, so
2006 * lets make sure we allow RCU processing.
2009 ret = run_tracer_selftest(p->type);
2010 /* If the test fails, then warn and remove from available_tracers */
2012 WARN(1, "tracer: %s failed selftest, disabling\n",
2014 last = &trace_types;
2015 for (t = trace_types; t; t = t->next) {
2026 tracing_selftest_running = false;
2029 mutex_unlock(&trace_types_lock);
2033 core_initcall(init_trace_selftests);
2035 static inline int run_tracer_selftest(struct tracer *type)
2039 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2041 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2043 static void __init apply_trace_boot_options(void);
2046 * register_tracer - register a tracer with the ftrace system.
2047 * @type: the plugin for the tracer
2049 * Register a new plugin tracer.
2051 int __init register_tracer(struct tracer *type)
2057 pr_info("Tracer must have a name\n");
2061 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2062 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2066 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2067 pr_warn("Can not register tracer %s due to lockdown\n",
2072 mutex_lock(&trace_types_lock);
2074 tracing_selftest_running = true;
2076 for (t = trace_types; t; t = t->next) {
2077 if (strcmp(type->name, t->name) == 0) {
2079 pr_info("Tracer %s already registered\n",
2086 if (!type->set_flag)
2087 type->set_flag = &dummy_set_flag;
2089 /*allocate a dummy tracer_flags*/
2090 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2095 type->flags->val = 0;
2096 type->flags->opts = dummy_tracer_opt;
2098 if (!type->flags->opts)
2099 type->flags->opts = dummy_tracer_opt;
2101 /* store the tracer for __set_tracer_option */
2102 type->flags->trace = type;
2104 ret = run_tracer_selftest(type);
2108 type->next = trace_types;
2110 add_tracer_options(&global_trace, type);
2113 tracing_selftest_running = false;
2114 mutex_unlock(&trace_types_lock);
2116 if (ret || !default_bootup_tracer)
2119 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2122 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2123 /* Do we want this tracer to start on bootup? */
2124 tracing_set_tracer(&global_trace, type->name);
2125 default_bootup_tracer = NULL;
2127 apply_trace_boot_options();
2129 /* disable other selftests, since this will break it. */
2130 disable_tracing_selftest("running a tracer");
2136 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2138 struct trace_buffer *buffer = buf->buffer;
2143 ring_buffer_record_disable(buffer);
2145 /* Make sure all commits have finished */
2147 ring_buffer_reset_cpu(buffer, cpu);
2149 ring_buffer_record_enable(buffer);
2152 void tracing_reset_online_cpus(struct array_buffer *buf)
2154 struct trace_buffer *buffer = buf->buffer;
2159 ring_buffer_record_disable(buffer);
2161 /* Make sure all commits have finished */
2164 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2166 ring_buffer_reset_online_cpus(buffer);
2168 ring_buffer_record_enable(buffer);
2171 /* Must have trace_types_lock held */
2172 void tracing_reset_all_online_cpus(void)
2174 struct trace_array *tr;
2176 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2177 if (!tr->clear_trace)
2179 tr->clear_trace = false;
2180 tracing_reset_online_cpus(&tr->array_buffer);
2181 #ifdef CONFIG_TRACER_MAX_TRACE
2182 tracing_reset_online_cpus(&tr->max_buffer);
2187 static int *tgid_map;
2189 #define SAVED_CMDLINES_DEFAULT 128
2190 #define NO_CMDLINE_MAP UINT_MAX
2191 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2192 struct saved_cmdlines_buffer {
2193 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2194 unsigned *map_cmdline_to_pid;
2195 unsigned cmdline_num;
2197 char *saved_cmdlines;
2199 static struct saved_cmdlines_buffer *savedcmd;
2201 /* temporary disable recording */
2202 static atomic_t trace_record_taskinfo_disabled __read_mostly;
2204 static inline char *get_saved_cmdlines(int idx)
2206 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2209 static inline void set_cmdline(int idx, const char *cmdline)
2211 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2214 static int allocate_cmdlines_buffer(unsigned int val,
2215 struct saved_cmdlines_buffer *s)
2217 s->map_cmdline_to_pid = kmalloc_array(val,
2218 sizeof(*s->map_cmdline_to_pid),
2220 if (!s->map_cmdline_to_pid)
2223 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2224 if (!s->saved_cmdlines) {
2225 kfree(s->map_cmdline_to_pid);
2230 s->cmdline_num = val;
2231 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2232 sizeof(s->map_pid_to_cmdline));
2233 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2234 val * sizeof(*s->map_cmdline_to_pid));
2239 static int trace_create_savedcmd(void)
2243 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2247 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2257 int is_tracing_stopped(void)
2259 return global_trace.stop_count;
2263 * tracing_start - quick start of the tracer
2265 * If tracing is enabled but was stopped by tracing_stop,
2266 * this will start the tracer back up.
2268 void tracing_start(void)
2270 struct trace_buffer *buffer;
2271 unsigned long flags;
2273 if (tracing_disabled)
2276 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2277 if (--global_trace.stop_count) {
2278 if (global_trace.stop_count < 0) {
2279 /* Someone screwed up their debugging */
2281 global_trace.stop_count = 0;
2286 /* Prevent the buffers from switching */
2287 arch_spin_lock(&global_trace.max_lock);
2289 buffer = global_trace.array_buffer.buffer;
2291 ring_buffer_record_enable(buffer);
2293 #ifdef CONFIG_TRACER_MAX_TRACE
2294 buffer = global_trace.max_buffer.buffer;
2296 ring_buffer_record_enable(buffer);
2299 arch_spin_unlock(&global_trace.max_lock);
2302 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2305 static void tracing_start_tr(struct trace_array *tr)
2307 struct trace_buffer *buffer;
2308 unsigned long flags;
2310 if (tracing_disabled)
2313 /* If global, we need to also start the max tracer */
2314 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2315 return tracing_start();
2317 raw_spin_lock_irqsave(&tr->start_lock, flags);
2319 if (--tr->stop_count) {
2320 if (tr->stop_count < 0) {
2321 /* Someone screwed up their debugging */
2328 buffer = tr->array_buffer.buffer;
2330 ring_buffer_record_enable(buffer);
2333 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2337 * tracing_stop - quick stop of the tracer
2339 * Light weight way to stop tracing. Use in conjunction with
2342 void tracing_stop(void)
2344 struct trace_buffer *buffer;
2345 unsigned long flags;
2347 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2348 if (global_trace.stop_count++)
2351 /* Prevent the buffers from switching */
2352 arch_spin_lock(&global_trace.max_lock);
2354 buffer = global_trace.array_buffer.buffer;
2356 ring_buffer_record_disable(buffer);
2358 #ifdef CONFIG_TRACER_MAX_TRACE
2359 buffer = global_trace.max_buffer.buffer;
2361 ring_buffer_record_disable(buffer);
2364 arch_spin_unlock(&global_trace.max_lock);
2367 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2370 static void tracing_stop_tr(struct trace_array *tr)
2372 struct trace_buffer *buffer;
2373 unsigned long flags;
2375 /* If global, we need to also stop the max tracer */
2376 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2377 return tracing_stop();
2379 raw_spin_lock_irqsave(&tr->start_lock, flags);
2380 if (tr->stop_count++)
2383 buffer = tr->array_buffer.buffer;
2385 ring_buffer_record_disable(buffer);
2388 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2391 static int trace_save_cmdline(struct task_struct *tsk)
2395 /* treat recording of idle task as a success */
2399 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2403 * It's not the end of the world if we don't get
2404 * the lock, but we also don't want to spin
2405 * nor do we want to disable interrupts,
2406 * so if we miss here, then better luck next time.
2408 if (!arch_spin_trylock(&trace_cmdline_lock))
2411 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2412 if (idx == NO_CMDLINE_MAP) {
2413 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2416 * Check whether the cmdline buffer at idx has a pid
2417 * mapped. We are going to overwrite that entry so we
2418 * need to clear the map_pid_to_cmdline. Otherwise we
2419 * would read the new comm for the old pid.
2421 pid = savedcmd->map_cmdline_to_pid[idx];
2422 if (pid != NO_CMDLINE_MAP)
2423 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2425 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2426 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2428 savedcmd->cmdline_idx = idx;
2431 set_cmdline(idx, tsk->comm);
2433 arch_spin_unlock(&trace_cmdline_lock);
2438 static void __trace_find_cmdline(int pid, char comm[])
2443 strcpy(comm, "<idle>");
2447 if (WARN_ON_ONCE(pid < 0)) {
2448 strcpy(comm, "<XXX>");
2452 if (pid > PID_MAX_DEFAULT) {
2453 strcpy(comm, "<...>");
2457 map = savedcmd->map_pid_to_cmdline[pid];
2458 if (map != NO_CMDLINE_MAP)
2459 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2461 strcpy(comm, "<...>");
2464 void trace_find_cmdline(int pid, char comm[])
2467 arch_spin_lock(&trace_cmdline_lock);
2469 __trace_find_cmdline(pid, comm);
2471 arch_spin_unlock(&trace_cmdline_lock);
2475 int trace_find_tgid(int pid)
2477 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2480 return tgid_map[pid];
2483 static int trace_save_tgid(struct task_struct *tsk)
2485 /* treat recording of idle task as a success */
2489 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2492 tgid_map[tsk->pid] = tsk->tgid;
2496 static bool tracing_record_taskinfo_skip(int flags)
2498 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2500 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2502 if (!__this_cpu_read(trace_taskinfo_save))
2508 * tracing_record_taskinfo - record the task info of a task
2510 * @task: task to record
2511 * @flags: TRACE_RECORD_CMDLINE for recording comm
2512 * TRACE_RECORD_TGID for recording tgid
2514 void tracing_record_taskinfo(struct task_struct *task, int flags)
2518 if (tracing_record_taskinfo_skip(flags))
2522 * Record as much task information as possible. If some fail, continue
2523 * to try to record the others.
2525 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2526 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2528 /* If recording any information failed, retry again soon. */
2532 __this_cpu_write(trace_taskinfo_save, false);
2536 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2538 * @prev: previous task during sched_switch
2539 * @next: next task during sched_switch
2540 * @flags: TRACE_RECORD_CMDLINE for recording comm
2541 * TRACE_RECORD_TGID for recording tgid
2543 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2544 struct task_struct *next, int flags)
2548 if (tracing_record_taskinfo_skip(flags))
2552 * Record as much task information as possible. If some fail, continue
2553 * to try to record the others.
2555 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2556 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2557 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2558 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2560 /* If recording any information failed, retry again soon. */
2564 __this_cpu_write(trace_taskinfo_save, false);
2567 /* Helpers to record a specific task information */
2568 void tracing_record_cmdline(struct task_struct *task)
2570 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2573 void tracing_record_tgid(struct task_struct *task)
2575 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2579 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2580 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2581 * simplifies those functions and keeps them in sync.
2583 enum print_line_t trace_handle_return(struct trace_seq *s)
2585 return trace_seq_has_overflowed(s) ?
2586 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2588 EXPORT_SYMBOL_GPL(trace_handle_return);
2590 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2592 unsigned int trace_flags = irqs_status;
2595 pc = preempt_count();
2598 trace_flags |= TRACE_FLAG_NMI;
2599 if (pc & HARDIRQ_MASK)
2600 trace_flags |= TRACE_FLAG_HARDIRQ;
2601 if (in_serving_softirq())
2602 trace_flags |= TRACE_FLAG_SOFTIRQ;
2604 if (tif_need_resched())
2605 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2606 if (test_preempt_need_resched())
2607 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2608 return (trace_flags << 16) | (pc & 0xff);
2611 struct ring_buffer_event *
2612 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2615 unsigned int trace_ctx)
2617 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2620 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2621 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2622 static int trace_buffered_event_ref;
2625 * trace_buffered_event_enable - enable buffering events
2627 * When events are being filtered, it is quicker to use a temporary
2628 * buffer to write the event data into if there's a likely chance
2629 * that it will not be committed. The discard of the ring buffer
2630 * is not as fast as committing, and is much slower than copying
2633 * When an event is to be filtered, allocate per cpu buffers to
2634 * write the event data into, and if the event is filtered and discarded
2635 * it is simply dropped, otherwise, the entire data is to be committed
2638 void trace_buffered_event_enable(void)
2640 struct ring_buffer_event *event;
2644 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2646 if (trace_buffered_event_ref++)
2649 for_each_tracing_cpu(cpu) {
2650 page = alloc_pages_node(cpu_to_node(cpu),
2651 GFP_KERNEL | __GFP_NORETRY, 0);
2655 event = page_address(page);
2656 memset(event, 0, sizeof(*event));
2658 per_cpu(trace_buffered_event, cpu) = event;
2661 if (cpu == smp_processor_id() &&
2662 __this_cpu_read(trace_buffered_event) !=
2663 per_cpu(trace_buffered_event, cpu))
2670 trace_buffered_event_disable();
2673 static void enable_trace_buffered_event(void *data)
2675 /* Probably not needed, but do it anyway */
2677 this_cpu_dec(trace_buffered_event_cnt);
2680 static void disable_trace_buffered_event(void *data)
2682 this_cpu_inc(trace_buffered_event_cnt);
2686 * trace_buffered_event_disable - disable buffering events
2688 * When a filter is removed, it is faster to not use the buffered
2689 * events, and to commit directly into the ring buffer. Free up
2690 * the temp buffers when there are no more users. This requires
2691 * special synchronization with current events.
2693 void trace_buffered_event_disable(void)
2697 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2699 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2702 if (--trace_buffered_event_ref)
2706 /* For each CPU, set the buffer as used. */
2707 smp_call_function_many(tracing_buffer_mask,
2708 disable_trace_buffered_event, NULL, 1);
2711 /* Wait for all current users to finish */
2714 for_each_tracing_cpu(cpu) {
2715 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2716 per_cpu(trace_buffered_event, cpu) = NULL;
2719 * Make sure trace_buffered_event is NULL before clearing
2720 * trace_buffered_event_cnt.
2725 /* Do the work on each cpu */
2726 smp_call_function_many(tracing_buffer_mask,
2727 enable_trace_buffered_event, NULL, 1);
2731 static struct trace_buffer *temp_buffer;
2733 struct ring_buffer_event *
2734 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2735 struct trace_event_file *trace_file,
2736 int type, unsigned long len,
2737 unsigned int trace_ctx)
2739 struct ring_buffer_event *entry;
2742 *current_rb = trace_file->tr->array_buffer.buffer;
2744 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2745 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2746 (entry = this_cpu_read(trace_buffered_event))) {
2747 /* Try to use the per cpu buffer first */
2748 val = this_cpu_inc_return(trace_buffered_event_cnt);
2749 if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
2750 trace_event_setup(entry, type, trace_ctx);
2751 entry->array[0] = len;
2754 this_cpu_dec(trace_buffered_event_cnt);
2757 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2760 * If tracing is off, but we have triggers enabled
2761 * we still need to look at the event data. Use the temp_buffer
2762 * to store the trace event for the trigger to use. It's recursive
2763 * safe and will not be recorded anywhere.
2765 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2766 *current_rb = temp_buffer;
2767 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2772 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2774 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2775 static DEFINE_MUTEX(tracepoint_printk_mutex);
2777 static void output_printk(struct trace_event_buffer *fbuffer)
2779 struct trace_event_call *event_call;
2780 struct trace_event_file *file;
2781 struct trace_event *event;
2782 unsigned long flags;
2783 struct trace_iterator *iter = tracepoint_print_iter;
2785 /* We should never get here if iter is NULL */
2786 if (WARN_ON_ONCE(!iter))
2789 event_call = fbuffer->trace_file->event_call;
2790 if (!event_call || !event_call->event.funcs ||
2791 !event_call->event.funcs->trace)
2794 file = fbuffer->trace_file;
2795 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2796 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2797 !filter_match_preds(file->filter, fbuffer->entry)))
2800 event = &fbuffer->trace_file->event_call->event;
2802 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2803 trace_seq_init(&iter->seq);
2804 iter->ent = fbuffer->entry;
2805 event_call->event.funcs->trace(iter, 0, event);
2806 trace_seq_putc(&iter->seq, 0);
2807 printk("%s", iter->seq.buffer);
2809 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2812 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2813 void *buffer, size_t *lenp,
2816 int save_tracepoint_printk;
2819 mutex_lock(&tracepoint_printk_mutex);
2820 save_tracepoint_printk = tracepoint_printk;
2822 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2825 * This will force exiting early, as tracepoint_printk
2826 * is always zero when tracepoint_printk_iter is not allocated
2828 if (!tracepoint_print_iter)
2829 tracepoint_printk = 0;
2831 if (save_tracepoint_printk == tracepoint_printk)
2834 if (tracepoint_printk)
2835 static_key_enable(&tracepoint_printk_key.key);
2837 static_key_disable(&tracepoint_printk_key.key);
2840 mutex_unlock(&tracepoint_printk_mutex);
2845 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2847 if (static_key_false(&tracepoint_printk_key.key))
2848 output_printk(fbuffer);
2850 if (static_branch_unlikely(&trace_event_exports_enabled))
2851 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2852 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2853 fbuffer->event, fbuffer->entry,
2854 fbuffer->trace_ctx, fbuffer->regs);
2856 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2861 * trace_buffer_unlock_commit_regs()
2862 * trace_event_buffer_commit()
2863 * trace_event_raw_event_xxx()
2865 # define STACK_SKIP 3
2867 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2868 struct trace_buffer *buffer,
2869 struct ring_buffer_event *event,
2870 unsigned int trace_ctx,
2871 struct pt_regs *regs)
2873 __buffer_unlock_commit(buffer, event);
2876 * If regs is not set, then skip the necessary functions.
2877 * Note, we can still get here via blktrace, wakeup tracer
2878 * and mmiotrace, but that's ok if they lose a function or
2879 * two. They are not that meaningful.
2881 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2882 ftrace_trace_userstack(tr, buffer, trace_ctx);
2886 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2889 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2890 struct ring_buffer_event *event)
2892 __buffer_unlock_commit(buffer, event);
2896 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2897 parent_ip, unsigned int trace_ctx)
2899 struct trace_event_call *call = &event_function;
2900 struct trace_buffer *buffer = tr->array_buffer.buffer;
2901 struct ring_buffer_event *event;
2902 struct ftrace_entry *entry;
2904 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2908 entry = ring_buffer_event_data(event);
2910 entry->parent_ip = parent_ip;
2912 if (!call_filter_check_discard(call, entry, buffer, event)) {
2913 if (static_branch_unlikely(&trace_function_exports_enabled))
2914 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2915 __buffer_unlock_commit(buffer, event);
2919 #ifdef CONFIG_STACKTRACE
2921 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2922 #define FTRACE_KSTACK_NESTING 4
2924 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2926 struct ftrace_stack {
2927 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2931 struct ftrace_stacks {
2932 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2935 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2936 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2938 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2939 unsigned int trace_ctx,
2940 int skip, struct pt_regs *regs)
2942 struct trace_event_call *call = &event_kernel_stack;
2943 struct ring_buffer_event *event;
2944 unsigned int size, nr_entries;
2945 struct ftrace_stack *fstack;
2946 struct stack_entry *entry;
2950 * Add one, for this function and the call to save_stack_trace()
2951 * If regs is set, then these functions will not be in the way.
2953 #ifndef CONFIG_UNWINDER_ORC
2958 preempt_disable_notrace();
2960 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2962 /* This should never happen. If it does, yell once and skip */
2963 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2967 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2968 * interrupt will either see the value pre increment or post
2969 * increment. If the interrupt happens pre increment it will have
2970 * restored the counter when it returns. We just need a barrier to
2971 * keep gcc from moving things around.
2975 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2976 size = ARRAY_SIZE(fstack->calls);
2979 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2982 nr_entries = stack_trace_save(fstack->calls, size, skip);
2985 size = nr_entries * sizeof(unsigned long);
2986 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2987 sizeof(*entry) + size, trace_ctx);
2990 entry = ring_buffer_event_data(event);
2992 memcpy(&entry->caller, fstack->calls, size);
2993 entry->size = nr_entries;
2995 if (!call_filter_check_discard(call, entry, buffer, event))
2996 __buffer_unlock_commit(buffer, event);
2999 /* Again, don't let gcc optimize things here */
3001 __this_cpu_dec(ftrace_stack_reserve);
3002 preempt_enable_notrace();
3006 static inline void ftrace_trace_stack(struct trace_array *tr,
3007 struct trace_buffer *buffer,
3008 unsigned int trace_ctx,
3009 int skip, struct pt_regs *regs)
3011 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3014 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3017 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3020 struct trace_buffer *buffer = tr->array_buffer.buffer;
3022 if (rcu_is_watching()) {
3023 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3028 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3029 * but if the above rcu_is_watching() failed, then the NMI
3030 * triggered someplace critical, and rcu_irq_enter() should
3031 * not be called from NMI.
3033 if (unlikely(in_nmi()))
3036 rcu_irq_enter_irqson();
3037 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3038 rcu_irq_exit_irqson();
3042 * trace_dump_stack - record a stack back trace in the trace buffer
3043 * @skip: Number of functions to skip (helper handlers)
3045 void trace_dump_stack(int skip)
3047 if (tracing_disabled || tracing_selftest_running)
3050 #ifndef CONFIG_UNWINDER_ORC
3051 /* Skip 1 to skip this function. */
3054 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3055 tracing_gen_ctx(), skip, NULL);
3057 EXPORT_SYMBOL_GPL(trace_dump_stack);
3059 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3060 static DEFINE_PER_CPU(int, user_stack_count);
3063 ftrace_trace_userstack(struct trace_array *tr,
3064 struct trace_buffer *buffer, unsigned int trace_ctx)
3066 struct trace_event_call *call = &event_user_stack;
3067 struct ring_buffer_event *event;
3068 struct userstack_entry *entry;
3070 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3074 * NMIs can not handle page faults, even with fix ups.
3075 * The save user stack can (and often does) fault.
3077 if (unlikely(in_nmi()))
3081 * prevent recursion, since the user stack tracing may
3082 * trigger other kernel events.
3085 if (__this_cpu_read(user_stack_count))
3088 __this_cpu_inc(user_stack_count);
3090 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3091 sizeof(*entry), trace_ctx);
3093 goto out_drop_count;
3094 entry = ring_buffer_event_data(event);
3096 entry->tgid = current->tgid;
3097 memset(&entry->caller, 0, sizeof(entry->caller));
3099 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3100 if (!call_filter_check_discard(call, entry, buffer, event))
3101 __buffer_unlock_commit(buffer, event);
3104 __this_cpu_dec(user_stack_count);
3108 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3109 static void ftrace_trace_userstack(struct trace_array *tr,
3110 struct trace_buffer *buffer,
3111 unsigned int trace_ctx)
3114 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3116 #endif /* CONFIG_STACKTRACE */
3118 /* created for use with alloc_percpu */
3119 struct trace_buffer_struct {
3121 char buffer[4][TRACE_BUF_SIZE];
3124 static struct trace_buffer_struct *trace_percpu_buffer;
3127 * This allows for lockless recording. If we're nested too deeply, then
3128 * this returns NULL.
3130 static char *get_trace_buf(void)
3132 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3134 if (!buffer || buffer->nesting >= 4)
3139 /* Interrupts must see nesting incremented before we use the buffer */
3141 return &buffer->buffer[buffer->nesting - 1][0];
3144 static void put_trace_buf(void)
3146 /* Don't let the decrement of nesting leak before this */
3148 this_cpu_dec(trace_percpu_buffer->nesting);
3151 static int alloc_percpu_trace_buffer(void)
3153 struct trace_buffer_struct *buffers;
3155 if (trace_percpu_buffer)
3158 buffers = alloc_percpu(struct trace_buffer_struct);
3159 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3162 trace_percpu_buffer = buffers;
3166 static int buffers_allocated;
3168 void trace_printk_init_buffers(void)
3170 if (buffers_allocated)
3173 if (alloc_percpu_trace_buffer())
3176 /* trace_printk() is for debug use only. Don't use it in production. */
3179 pr_warn("**********************************************************\n");
3180 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3182 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3184 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3185 pr_warn("** unsafe for production use. **\n");
3187 pr_warn("** If you see this message and you are not debugging **\n");
3188 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3190 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3191 pr_warn("**********************************************************\n");
3193 /* Expand the buffers to set size */
3194 tracing_update_buffers();
3196 buffers_allocated = 1;
3199 * trace_printk_init_buffers() can be called by modules.
3200 * If that happens, then we need to start cmdline recording
3201 * directly here. If the global_trace.buffer is already
3202 * allocated here, then this was called by module code.
3204 if (global_trace.array_buffer.buffer)
3205 tracing_start_cmdline_record();
3207 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3209 void trace_printk_start_comm(void)
3211 /* Start tracing comms if trace printk is set */
3212 if (!buffers_allocated)
3214 tracing_start_cmdline_record();
3217 static void trace_printk_start_stop_comm(int enabled)
3219 if (!buffers_allocated)
3223 tracing_start_cmdline_record();
3225 tracing_stop_cmdline_record();
3229 * trace_vbprintk - write binary msg to tracing buffer
3230 * @ip: The address of the caller
3231 * @fmt: The string format to write to the buffer
3232 * @args: Arguments for @fmt
3234 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3236 struct trace_event_call *call = &event_bprint;
3237 struct ring_buffer_event *event;
3238 struct trace_buffer *buffer;
3239 struct trace_array *tr = &global_trace;
3240 struct bprint_entry *entry;
3241 unsigned int trace_ctx;
3245 if (unlikely(tracing_selftest_running || tracing_disabled))
3248 /* Don't pollute graph traces with trace_vprintk internals */
3249 pause_graph_tracing();
3251 trace_ctx = tracing_gen_ctx();
3252 preempt_disable_notrace();
3254 tbuffer = get_trace_buf();
3260 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3262 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3265 size = sizeof(*entry) + sizeof(u32) * len;
3266 buffer = tr->array_buffer.buffer;
3267 ring_buffer_nest_start(buffer);
3268 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3272 entry = ring_buffer_event_data(event);
3276 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3277 if (!call_filter_check_discard(call, entry, buffer, event)) {
3278 __buffer_unlock_commit(buffer, event);
3279 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3283 ring_buffer_nest_end(buffer);
3288 preempt_enable_notrace();
3289 unpause_graph_tracing();
3293 EXPORT_SYMBOL_GPL(trace_vbprintk);
3297 __trace_array_vprintk(struct trace_buffer *buffer,
3298 unsigned long ip, const char *fmt, va_list args)
3300 struct trace_event_call *call = &event_print;
3301 struct ring_buffer_event *event;
3303 struct print_entry *entry;
3304 unsigned int trace_ctx;
3307 if (tracing_disabled || tracing_selftest_running)
3310 /* Don't pollute graph traces with trace_vprintk internals */
3311 pause_graph_tracing();
3313 trace_ctx = tracing_gen_ctx();
3314 preempt_disable_notrace();
3317 tbuffer = get_trace_buf();
3323 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3325 size = sizeof(*entry) + len + 1;
3326 ring_buffer_nest_start(buffer);
3327 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3331 entry = ring_buffer_event_data(event);
3334 memcpy(&entry->buf, tbuffer, len + 1);
3335 if (!call_filter_check_discard(call, entry, buffer, event)) {
3336 __buffer_unlock_commit(buffer, event);
3337 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3341 ring_buffer_nest_end(buffer);
3345 preempt_enable_notrace();
3346 unpause_graph_tracing();
3352 int trace_array_vprintk(struct trace_array *tr,
3353 unsigned long ip, const char *fmt, va_list args)
3355 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3359 * trace_array_printk - Print a message to a specific instance
3360 * @tr: The instance trace_array descriptor
3361 * @ip: The instruction pointer that this is called from.
3362 * @fmt: The format to print (printf format)
3364 * If a subsystem sets up its own instance, they have the right to
3365 * printk strings into their tracing instance buffer using this
3366 * function. Note, this function will not write into the top level
3367 * buffer (use trace_printk() for that), as writing into the top level
3368 * buffer should only have events that can be individually disabled.
3369 * trace_printk() is only used for debugging a kernel, and should not
3370 * be ever encorporated in normal use.
3372 * trace_array_printk() can be used, as it will not add noise to the
3373 * top level tracing buffer.
3375 * Note, trace_array_init_printk() must be called on @tr before this
3379 int trace_array_printk(struct trace_array *tr,
3380 unsigned long ip, const char *fmt, ...)
3388 /* This is only allowed for created instances */
3389 if (tr == &global_trace)
3392 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3396 ret = trace_array_vprintk(tr, ip, fmt, ap);
3400 EXPORT_SYMBOL_GPL(trace_array_printk);
3403 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3404 * @tr: The trace array to initialize the buffers for
3406 * As trace_array_printk() only writes into instances, they are OK to
3407 * have in the kernel (unlike trace_printk()). This needs to be called
3408 * before trace_array_printk() can be used on a trace_array.
3410 int trace_array_init_printk(struct trace_array *tr)
3415 /* This is only allowed for created instances */
3416 if (tr == &global_trace)
3419 return alloc_percpu_trace_buffer();
3421 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3424 int trace_array_printk_buf(struct trace_buffer *buffer,
3425 unsigned long ip, const char *fmt, ...)
3430 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3434 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3440 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3442 return trace_array_vprintk(&global_trace, ip, fmt, args);
3444 EXPORT_SYMBOL_GPL(trace_vprintk);
3446 static void trace_iterator_increment(struct trace_iterator *iter)
3448 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3452 ring_buffer_iter_advance(buf_iter);
3455 static struct trace_entry *
3456 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3457 unsigned long *lost_events)
3459 struct ring_buffer_event *event;
3460 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3463 event = ring_buffer_iter_peek(buf_iter, ts);
3465 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3466 (unsigned long)-1 : 0;
3468 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3473 iter->ent_size = ring_buffer_event_length(event);
3474 return ring_buffer_event_data(event);
3480 static struct trace_entry *
3481 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3482 unsigned long *missing_events, u64 *ent_ts)
3484 struct trace_buffer *buffer = iter->array_buffer->buffer;
3485 struct trace_entry *ent, *next = NULL;
3486 unsigned long lost_events = 0, next_lost = 0;
3487 int cpu_file = iter->cpu_file;
3488 u64 next_ts = 0, ts;
3494 * If we are in a per_cpu trace file, don't bother by iterating over
3495 * all cpu and peek directly.
3497 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3498 if (ring_buffer_empty_cpu(buffer, cpu_file))
3500 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3502 *ent_cpu = cpu_file;
3507 for_each_tracing_cpu(cpu) {
3509 if (ring_buffer_empty_cpu(buffer, cpu))
3512 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3515 * Pick the entry with the smallest timestamp:
3517 if (ent && (!next || ts < next_ts)) {
3521 next_lost = lost_events;
3522 next_size = iter->ent_size;
3526 iter->ent_size = next_size;
3529 *ent_cpu = next_cpu;
3535 *missing_events = next_lost;
3540 #define STATIC_FMT_BUF_SIZE 128
3541 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3543 static char *trace_iter_expand_format(struct trace_iterator *iter)
3547 if (iter->fmt == static_fmt_buf)
3550 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3553 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3560 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3562 const char *p, *new_fmt;
3565 if (WARN_ON_ONCE(!fmt))
3568 if (iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3572 new_fmt = q = iter->fmt;
3574 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3575 if (!trace_iter_expand_format(iter))
3578 q += iter->fmt - new_fmt;
3579 new_fmt = iter->fmt;
3584 /* Replace %p with %px */
3588 } else if (p[0] == 'p' && !isalnum(p[1])) {
3599 #define STATIC_TEMP_BUF_SIZE 128
3600 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3602 /* Find the next real entry, without updating the iterator itself */
3603 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3604 int *ent_cpu, u64 *ent_ts)
3606 /* __find_next_entry will reset ent_size */
3607 int ent_size = iter->ent_size;
3608 struct trace_entry *entry;
3611 * If called from ftrace_dump(), then the iter->temp buffer
3612 * will be the static_temp_buf and not created from kmalloc.
3613 * If the entry size is greater than the buffer, we can
3614 * not save it. Just return NULL in that case. This is only
3615 * used to add markers when two consecutive events' time
3616 * stamps have a large delta. See trace_print_lat_context()
3618 if (iter->temp == static_temp_buf &&
3619 STATIC_TEMP_BUF_SIZE < ent_size)
3623 * The __find_next_entry() may call peek_next_entry(), which may
3624 * call ring_buffer_peek() that may make the contents of iter->ent
3625 * undefined. Need to copy iter->ent now.
3627 if (iter->ent && iter->ent != iter->temp) {
3628 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3629 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3631 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3636 iter->temp_size = iter->ent_size;
3638 memcpy(iter->temp, iter->ent, iter->ent_size);
3639 iter->ent = iter->temp;
3641 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3642 /* Put back the original ent_size */
3643 iter->ent_size = ent_size;
3648 /* Find the next real entry, and increment the iterator to the next entry */
3649 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3651 iter->ent = __find_next_entry(iter, &iter->cpu,
3652 &iter->lost_events, &iter->ts);
3655 trace_iterator_increment(iter);
3657 return iter->ent ? iter : NULL;
3660 static void trace_consume(struct trace_iterator *iter)
3662 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3663 &iter->lost_events);
3666 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3668 struct trace_iterator *iter = m->private;
3672 WARN_ON_ONCE(iter->leftover);
3676 /* can't go backwards */
3681 ent = trace_find_next_entry_inc(iter);
3685 while (ent && iter->idx < i)
3686 ent = trace_find_next_entry_inc(iter);
3693 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3695 struct ring_buffer_iter *buf_iter;
3696 unsigned long entries = 0;
3699 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3701 buf_iter = trace_buffer_iter(iter, cpu);
3705 ring_buffer_iter_reset(buf_iter);
3708 * We could have the case with the max latency tracers
3709 * that a reset never took place on a cpu. This is evident
3710 * by the timestamp being before the start of the buffer.
3712 while (ring_buffer_iter_peek(buf_iter, &ts)) {
3713 if (ts >= iter->array_buffer->time_start)
3716 ring_buffer_iter_advance(buf_iter);
3719 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3723 * The current tracer is copied to avoid a global locking
3726 static void *s_start(struct seq_file *m, loff_t *pos)
3728 struct trace_iterator *iter = m->private;
3729 struct trace_array *tr = iter->tr;
3730 int cpu_file = iter->cpu_file;
3736 * copy the tracer to avoid using a global lock all around.
3737 * iter->trace is a copy of current_trace, the pointer to the
3738 * name may be used instead of a strcmp(), as iter->trace->name
3739 * will point to the same string as current_trace->name.
3741 mutex_lock(&trace_types_lock);
3742 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3743 *iter->trace = *tr->current_trace;
3744 mutex_unlock(&trace_types_lock);
3746 #ifdef CONFIG_TRACER_MAX_TRACE
3747 if (iter->snapshot && iter->trace->use_max_tr)
3748 return ERR_PTR(-EBUSY);
3751 if (!iter->snapshot)
3752 atomic_inc(&trace_record_taskinfo_disabled);
3754 if (*pos != iter->pos) {
3759 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3760 for_each_tracing_cpu(cpu)
3761 tracing_iter_reset(iter, cpu);
3763 tracing_iter_reset(iter, cpu_file);
3766 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3771 * If we overflowed the seq_file before, then we want
3772 * to just reuse the trace_seq buffer again.
3778 p = s_next(m, p, &l);
3782 trace_event_read_lock();
3783 trace_access_lock(cpu_file);
3787 static void s_stop(struct seq_file *m, void *p)
3789 struct trace_iterator *iter = m->private;
3791 #ifdef CONFIG_TRACER_MAX_TRACE
3792 if (iter->snapshot && iter->trace->use_max_tr)
3796 if (!iter->snapshot)
3797 atomic_dec(&trace_record_taskinfo_disabled);
3799 trace_access_unlock(iter->cpu_file);
3800 trace_event_read_unlock();
3804 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3805 unsigned long *entries, int cpu)
3807 unsigned long count;
3809 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3811 * If this buffer has skipped entries, then we hold all
3812 * entries for the trace and we need to ignore the
3813 * ones before the time stamp.
3815 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3816 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3817 /* total is the same as the entries */
3821 ring_buffer_overrun_cpu(buf->buffer, cpu);
3826 get_total_entries(struct array_buffer *buf,
3827 unsigned long *total, unsigned long *entries)
3835 for_each_tracing_cpu(cpu) {
3836 get_total_entries_cpu(buf, &t, &e, cpu);
3842 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3844 unsigned long total, entries;
3849 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
3854 unsigned long trace_total_entries(struct trace_array *tr)
3856 unsigned long total, entries;
3861 get_total_entries(&tr->array_buffer, &total, &entries);
3866 static void print_lat_help_header(struct seq_file *m)
3868 seq_puts(m, "# _------=> CPU# \n"
3869 "# / _-----=> irqs-off \n"
3870 "# | / _----=> need-resched \n"
3871 "# || / _---=> hardirq/softirq \n"
3872 "# ||| / _--=> preempt-depth \n"
3874 "# cmd pid ||||| time | caller \n"
3875 "# \\ / ||||| \\ | / \n");
3878 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
3880 unsigned long total;
3881 unsigned long entries;
3883 get_total_entries(buf, &total, &entries);
3884 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3885 entries, total, num_online_cpus());
3889 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
3892 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3894 print_event_info(buf, m);
3896 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3897 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3900 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
3903 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3904 const char *space = " ";
3905 int prec = tgid ? 12 : 2;
3907 print_event_info(buf, m);
3909 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3910 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3911 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3912 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3913 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3914 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3915 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3919 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3921 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3922 struct array_buffer *buf = iter->array_buffer;
3923 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3924 struct tracer *type = iter->trace;
3925 unsigned long entries;
3926 unsigned long total;
3927 const char *name = "preemption";
3931 get_total_entries(buf, &total, &entries);
3933 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3935 seq_puts(m, "# -----------------------------------"
3936 "---------------------------------\n");
3937 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3938 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3939 nsecs_to_usecs(data->saved_latency),
3943 #if defined(CONFIG_PREEMPT_NONE)
3945 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3947 #elif defined(CONFIG_PREEMPT)
3949 #elif defined(CONFIG_PREEMPT_RT)
3954 /* These are reserved for later use */
3957 seq_printf(m, " #P:%d)\n", num_online_cpus());
3961 seq_puts(m, "# -----------------\n");
3962 seq_printf(m, "# | task: %.16s-%d "
3963 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3964 data->comm, data->pid,
3965 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3966 data->policy, data->rt_priority);
3967 seq_puts(m, "# -----------------\n");
3969 if (data->critical_start) {
3970 seq_puts(m, "# => started at: ");
3971 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3972 trace_print_seq(m, &iter->seq);
3973 seq_puts(m, "\n# => ended at: ");
3974 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3975 trace_print_seq(m, &iter->seq);
3976 seq_puts(m, "\n#\n");
3982 static void test_cpu_buff_start(struct trace_iterator *iter)
3984 struct trace_seq *s = &iter->seq;
3985 struct trace_array *tr = iter->tr;
3987 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3990 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3993 if (cpumask_available(iter->started) &&
3994 cpumask_test_cpu(iter->cpu, iter->started))
3997 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4000 if (cpumask_available(iter->started))
4001 cpumask_set_cpu(iter->cpu, iter->started);
4003 /* Don't print started cpu buffer for the first entry of the trace */
4005 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4009 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4011 struct trace_array *tr = iter->tr;
4012 struct trace_seq *s = &iter->seq;
4013 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4014 struct trace_entry *entry;
4015 struct trace_event *event;
4019 test_cpu_buff_start(iter);
4021 event = ftrace_find_event(entry->type);
4023 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4024 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4025 trace_print_lat_context(iter);
4027 trace_print_context(iter);
4030 if (trace_seq_has_overflowed(s))
4031 return TRACE_TYPE_PARTIAL_LINE;
4034 return event->funcs->trace(iter, sym_flags, event);
4036 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4038 return trace_handle_return(s);
4041 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4043 struct trace_array *tr = iter->tr;
4044 struct trace_seq *s = &iter->seq;
4045 struct trace_entry *entry;
4046 struct trace_event *event;
4050 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4051 trace_seq_printf(s, "%d %d %llu ",
4052 entry->pid, iter->cpu, iter->ts);
4054 if (trace_seq_has_overflowed(s))
4055 return TRACE_TYPE_PARTIAL_LINE;
4057 event = ftrace_find_event(entry->type);
4059 return event->funcs->raw(iter, 0, event);
4061 trace_seq_printf(s, "%d ?\n", entry->type);
4063 return trace_handle_return(s);
4066 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4068 struct trace_array *tr = iter->tr;
4069 struct trace_seq *s = &iter->seq;
4070 unsigned char newline = '\n';
4071 struct trace_entry *entry;
4072 struct trace_event *event;
4076 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4077 SEQ_PUT_HEX_FIELD(s, entry->pid);
4078 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4079 SEQ_PUT_HEX_FIELD(s, iter->ts);
4080 if (trace_seq_has_overflowed(s))
4081 return TRACE_TYPE_PARTIAL_LINE;
4084 event = ftrace_find_event(entry->type);
4086 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4087 if (ret != TRACE_TYPE_HANDLED)
4091 SEQ_PUT_FIELD(s, newline);
4093 return trace_handle_return(s);
4096 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4098 struct trace_array *tr = iter->tr;
4099 struct trace_seq *s = &iter->seq;
4100 struct trace_entry *entry;
4101 struct trace_event *event;
4105 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4106 SEQ_PUT_FIELD(s, entry->pid);
4107 SEQ_PUT_FIELD(s, iter->cpu);
4108 SEQ_PUT_FIELD(s, iter->ts);
4109 if (trace_seq_has_overflowed(s))
4110 return TRACE_TYPE_PARTIAL_LINE;
4113 event = ftrace_find_event(entry->type);
4114 return event ? event->funcs->binary(iter, 0, event) :
4118 int trace_empty(struct trace_iterator *iter)
4120 struct ring_buffer_iter *buf_iter;
4123 /* If we are looking at one CPU buffer, only check that one */
4124 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4125 cpu = iter->cpu_file;
4126 buf_iter = trace_buffer_iter(iter, cpu);
4128 if (!ring_buffer_iter_empty(buf_iter))
4131 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4137 for_each_tracing_cpu(cpu) {
4138 buf_iter = trace_buffer_iter(iter, cpu);
4140 if (!ring_buffer_iter_empty(buf_iter))
4143 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4151 /* Called with trace_event_read_lock() held. */
4152 enum print_line_t print_trace_line(struct trace_iterator *iter)
4154 struct trace_array *tr = iter->tr;
4155 unsigned long trace_flags = tr->trace_flags;
4156 enum print_line_t ret;
4158 if (iter->lost_events) {
4159 if (iter->lost_events == (unsigned long)-1)
4160 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4163 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4164 iter->cpu, iter->lost_events);
4165 if (trace_seq_has_overflowed(&iter->seq))
4166 return TRACE_TYPE_PARTIAL_LINE;
4169 if (iter->trace && iter->trace->print_line) {
4170 ret = iter->trace->print_line(iter);
4171 if (ret != TRACE_TYPE_UNHANDLED)
4175 if (iter->ent->type == TRACE_BPUTS &&
4176 trace_flags & TRACE_ITER_PRINTK &&
4177 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4178 return trace_print_bputs_msg_only(iter);
4180 if (iter->ent->type == TRACE_BPRINT &&
4181 trace_flags & TRACE_ITER_PRINTK &&
4182 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4183 return trace_print_bprintk_msg_only(iter);
4185 if (iter->ent->type == TRACE_PRINT &&
4186 trace_flags & TRACE_ITER_PRINTK &&
4187 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4188 return trace_print_printk_msg_only(iter);
4190 if (trace_flags & TRACE_ITER_BIN)
4191 return print_bin_fmt(iter);
4193 if (trace_flags & TRACE_ITER_HEX)
4194 return print_hex_fmt(iter);
4196 if (trace_flags & TRACE_ITER_RAW)
4197 return print_raw_fmt(iter);
4199 return print_trace_fmt(iter);
4202 void trace_latency_header(struct seq_file *m)
4204 struct trace_iterator *iter = m->private;
4205 struct trace_array *tr = iter->tr;
4207 /* print nothing if the buffers are empty */
4208 if (trace_empty(iter))
4211 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4212 print_trace_header(m, iter);
4214 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4215 print_lat_help_header(m);
4218 void trace_default_header(struct seq_file *m)
4220 struct trace_iterator *iter = m->private;
4221 struct trace_array *tr = iter->tr;
4222 unsigned long trace_flags = tr->trace_flags;
4224 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4227 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4228 /* print nothing if the buffers are empty */
4229 if (trace_empty(iter))
4231 print_trace_header(m, iter);
4232 if (!(trace_flags & TRACE_ITER_VERBOSE))
4233 print_lat_help_header(m);
4235 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4236 if (trace_flags & TRACE_ITER_IRQ_INFO)
4237 print_func_help_header_irq(iter->array_buffer,
4240 print_func_help_header(iter->array_buffer, m,
4246 static void test_ftrace_alive(struct seq_file *m)
4248 if (!ftrace_is_dead())
4250 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4251 "# MAY BE MISSING FUNCTION EVENTS\n");
4254 #ifdef CONFIG_TRACER_MAX_TRACE
4255 static void show_snapshot_main_help(struct seq_file *m)
4257 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4258 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4259 "# Takes a snapshot of the main buffer.\n"
4260 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4261 "# (Doesn't have to be '2' works with any number that\n"
4262 "# is not a '0' or '1')\n");
4265 static void show_snapshot_percpu_help(struct seq_file *m)
4267 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4268 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4269 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4270 "# Takes a snapshot of the main buffer for this cpu.\n");
4272 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4273 "# Must use main snapshot file to allocate.\n");
4275 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4276 "# (Doesn't have to be '2' works with any number that\n"
4277 "# is not a '0' or '1')\n");
4280 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4282 if (iter->tr->allocated_snapshot)
4283 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4285 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4287 seq_puts(m, "# Snapshot commands:\n");
4288 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4289 show_snapshot_main_help(m);
4291 show_snapshot_percpu_help(m);
4294 /* Should never be called */
4295 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4298 static int s_show(struct seq_file *m, void *v)
4300 struct trace_iterator *iter = v;
4303 if (iter->ent == NULL) {
4305 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4307 test_ftrace_alive(m);
4309 if (iter->snapshot && trace_empty(iter))
4310 print_snapshot_help(m, iter);
4311 else if (iter->trace && iter->trace->print_header)
4312 iter->trace->print_header(m);
4314 trace_default_header(m);
4316 } else if (iter->leftover) {
4318 * If we filled the seq_file buffer earlier, we
4319 * want to just show it now.
4321 ret = trace_print_seq(m, &iter->seq);
4323 /* ret should this time be zero, but you never know */
4324 iter->leftover = ret;
4327 print_trace_line(iter);
4328 ret = trace_print_seq(m, &iter->seq);
4330 * If we overflow the seq_file buffer, then it will
4331 * ask us for this data again at start up.
4333 * ret is 0 if seq_file write succeeded.
4336 iter->leftover = ret;
4343 * Should be used after trace_array_get(), trace_types_lock
4344 * ensures that i_cdev was already initialized.
4346 static inline int tracing_get_cpu(struct inode *inode)
4348 if (inode->i_cdev) /* See trace_create_cpu_file() */
4349 return (long)inode->i_cdev - 1;
4350 return RING_BUFFER_ALL_CPUS;
4353 static const struct seq_operations tracer_seq_ops = {
4360 static struct trace_iterator *
4361 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4363 struct trace_array *tr = inode->i_private;
4364 struct trace_iterator *iter;
4367 if (tracing_disabled)
4368 return ERR_PTR(-ENODEV);
4370 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4372 return ERR_PTR(-ENOMEM);
4374 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4376 if (!iter->buffer_iter)
4380 * trace_find_next_entry() may need to save off iter->ent.
4381 * It will place it into the iter->temp buffer. As most
4382 * events are less than 128, allocate a buffer of that size.
4383 * If one is greater, then trace_find_next_entry() will
4384 * allocate a new buffer to adjust for the bigger iter->ent.
4385 * It's not critical if it fails to get allocated here.
4387 iter->temp = kmalloc(128, GFP_KERNEL);
4389 iter->temp_size = 128;
4392 * trace_event_printf() may need to modify given format
4393 * string to replace %p with %px so that it shows real address
4394 * instead of hash value. However, that is only for the event
4395 * tracing, other tracer may not need. Defer the allocation
4396 * until it is needed.
4402 * We make a copy of the current tracer to avoid concurrent
4403 * changes on it while we are reading.
4405 mutex_lock(&trace_types_lock);
4406 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4410 *iter->trace = *tr->current_trace;
4412 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4417 #ifdef CONFIG_TRACER_MAX_TRACE
4418 /* Currently only the top directory has a snapshot */
4419 if (tr->current_trace->print_max || snapshot)
4420 iter->array_buffer = &tr->max_buffer;
4423 iter->array_buffer = &tr->array_buffer;
4424 iter->snapshot = snapshot;
4426 iter->cpu_file = tracing_get_cpu(inode);
4427 mutex_init(&iter->mutex);
4429 /* Notify the tracer early; before we stop tracing. */
4430 if (iter->trace->open)
4431 iter->trace->open(iter);
4433 /* Annotate start of buffers if we had overruns */
4434 if (ring_buffer_overruns(iter->array_buffer->buffer))
4435 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4437 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4438 if (trace_clocks[tr->clock_id].in_ns)
4439 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4442 * If pause-on-trace is enabled, then stop the trace while
4443 * dumping, unless this is the "snapshot" file
4445 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4446 tracing_stop_tr(tr);
4448 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4449 for_each_tracing_cpu(cpu) {
4450 iter->buffer_iter[cpu] =
4451 ring_buffer_read_prepare(iter->array_buffer->buffer,
4454 ring_buffer_read_prepare_sync();
4455 for_each_tracing_cpu(cpu) {
4456 ring_buffer_read_start(iter->buffer_iter[cpu]);
4457 tracing_iter_reset(iter, cpu);
4460 cpu = iter->cpu_file;
4461 iter->buffer_iter[cpu] =
4462 ring_buffer_read_prepare(iter->array_buffer->buffer,
4464 ring_buffer_read_prepare_sync();
4465 ring_buffer_read_start(iter->buffer_iter[cpu]);
4466 tracing_iter_reset(iter, cpu);
4469 mutex_unlock(&trace_types_lock);
4474 mutex_unlock(&trace_types_lock);
4477 kfree(iter->buffer_iter);
4479 seq_release_private(inode, file);
4480 return ERR_PTR(-ENOMEM);
4483 int tracing_open_generic(struct inode *inode, struct file *filp)
4487 ret = tracing_check_open_get_tr(NULL);
4491 filp->private_data = inode->i_private;
4495 bool tracing_is_disabled(void)
4497 return (tracing_disabled) ? true: false;
4501 * Open and update trace_array ref count.
4502 * Must have the current trace_array passed to it.
4504 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4506 struct trace_array *tr = inode->i_private;
4509 ret = tracing_check_open_get_tr(tr);
4513 filp->private_data = inode->i_private;
4518 static int tracing_release(struct inode *inode, struct file *file)
4520 struct trace_array *tr = inode->i_private;
4521 struct seq_file *m = file->private_data;
4522 struct trace_iterator *iter;
4525 if (!(file->f_mode & FMODE_READ)) {
4526 trace_array_put(tr);
4530 /* Writes do not use seq_file */
4532 mutex_lock(&trace_types_lock);
4534 for_each_tracing_cpu(cpu) {
4535 if (iter->buffer_iter[cpu])
4536 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4539 if (iter->trace && iter->trace->close)
4540 iter->trace->close(iter);
4542 if (!iter->snapshot && tr->stop_count)
4543 /* reenable tracing if it was previously enabled */
4544 tracing_start_tr(tr);
4546 __trace_array_put(tr);
4548 mutex_unlock(&trace_types_lock);
4550 mutex_destroy(&iter->mutex);
4551 free_cpumask_var(iter->started);
4555 kfree(iter->buffer_iter);
4556 seq_release_private(inode, file);
4561 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4563 struct trace_array *tr = inode->i_private;
4565 trace_array_put(tr);
4569 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4571 struct trace_array *tr = inode->i_private;
4573 trace_array_put(tr);
4575 return single_release(inode, file);
4578 static int tracing_open(struct inode *inode, struct file *file)
4580 struct trace_array *tr = inode->i_private;
4581 struct trace_iterator *iter;
4584 ret = tracing_check_open_get_tr(tr);
4588 /* If this file was open for write, then erase contents */
4589 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4590 int cpu = tracing_get_cpu(inode);
4591 struct array_buffer *trace_buf = &tr->array_buffer;
4593 #ifdef CONFIG_TRACER_MAX_TRACE
4594 if (tr->current_trace->print_max)
4595 trace_buf = &tr->max_buffer;
4598 if (cpu == RING_BUFFER_ALL_CPUS)
4599 tracing_reset_online_cpus(trace_buf);
4601 tracing_reset_cpu(trace_buf, cpu);
4604 if (file->f_mode & FMODE_READ) {
4605 iter = __tracing_open(inode, file, false);
4607 ret = PTR_ERR(iter);
4608 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4609 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4613 trace_array_put(tr);
4619 * Some tracers are not suitable for instance buffers.
4620 * A tracer is always available for the global array (toplevel)
4621 * or if it explicitly states that it is.
4624 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4626 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4629 /* Find the next tracer that this trace array may use */
4630 static struct tracer *
4631 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4633 while (t && !trace_ok_for_array(t, tr))
4640 t_next(struct seq_file *m, void *v, loff_t *pos)
4642 struct trace_array *tr = m->private;
4643 struct tracer *t = v;
4648 t = get_tracer_for_array(tr, t->next);
4653 static void *t_start(struct seq_file *m, loff_t *pos)
4655 struct trace_array *tr = m->private;
4659 mutex_lock(&trace_types_lock);
4661 t = get_tracer_for_array(tr, trace_types);
4662 for (; t && l < *pos; t = t_next(m, t, &l))
4668 static void t_stop(struct seq_file *m, void *p)
4670 mutex_unlock(&trace_types_lock);
4673 static int t_show(struct seq_file *m, void *v)
4675 struct tracer *t = v;
4680 seq_puts(m, t->name);
4689 static const struct seq_operations show_traces_seq_ops = {
4696 static int show_traces_open(struct inode *inode, struct file *file)
4698 struct trace_array *tr = inode->i_private;
4702 ret = tracing_check_open_get_tr(tr);
4706 ret = seq_open(file, &show_traces_seq_ops);
4708 trace_array_put(tr);
4712 m = file->private_data;
4718 static int show_traces_release(struct inode *inode, struct file *file)
4720 struct trace_array *tr = inode->i_private;
4722 trace_array_put(tr);
4723 return seq_release(inode, file);
4727 tracing_write_stub(struct file *filp, const char __user *ubuf,
4728 size_t count, loff_t *ppos)
4733 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4737 if (file->f_mode & FMODE_READ)
4738 ret = seq_lseek(file, offset, whence);
4740 file->f_pos = ret = 0;
4745 static const struct file_operations tracing_fops = {
4746 .open = tracing_open,
4748 .write = tracing_write_stub,
4749 .llseek = tracing_lseek,
4750 .release = tracing_release,
4753 static const struct file_operations show_traces_fops = {
4754 .open = show_traces_open,
4756 .llseek = seq_lseek,
4757 .release = show_traces_release,
4761 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4762 size_t count, loff_t *ppos)
4764 struct trace_array *tr = file_inode(filp)->i_private;
4768 len = snprintf(NULL, 0, "%*pb\n",
4769 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4770 mask_str = kmalloc(len, GFP_KERNEL);
4774 len = snprintf(mask_str, len, "%*pb\n",
4775 cpumask_pr_args(tr->tracing_cpumask));
4780 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4788 int tracing_set_cpumask(struct trace_array *tr,
4789 cpumask_var_t tracing_cpumask_new)
4796 local_irq_disable();
4797 arch_spin_lock(&tr->max_lock);
4798 for_each_tracing_cpu(cpu) {
4800 * Increase/decrease the disabled counter if we are
4801 * about to flip a bit in the cpumask:
4803 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4804 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4805 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4806 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4808 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4809 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4810 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4811 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4814 arch_spin_unlock(&tr->max_lock);
4817 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4823 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4824 size_t count, loff_t *ppos)
4826 struct trace_array *tr = file_inode(filp)->i_private;
4827 cpumask_var_t tracing_cpumask_new;
4830 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4833 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4837 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4841 free_cpumask_var(tracing_cpumask_new);
4846 free_cpumask_var(tracing_cpumask_new);
4851 static const struct file_operations tracing_cpumask_fops = {
4852 .open = tracing_open_generic_tr,
4853 .read = tracing_cpumask_read,
4854 .write = tracing_cpumask_write,
4855 .release = tracing_release_generic_tr,
4856 .llseek = generic_file_llseek,
4859 static int tracing_trace_options_show(struct seq_file *m, void *v)
4861 struct tracer_opt *trace_opts;
4862 struct trace_array *tr = m->private;
4866 mutex_lock(&trace_types_lock);
4867 tracer_flags = tr->current_trace->flags->val;
4868 trace_opts = tr->current_trace->flags->opts;
4870 for (i = 0; trace_options[i]; i++) {
4871 if (tr->trace_flags & (1 << i))
4872 seq_printf(m, "%s\n", trace_options[i]);
4874 seq_printf(m, "no%s\n", trace_options[i]);
4877 for (i = 0; trace_opts[i].name; i++) {
4878 if (tracer_flags & trace_opts[i].bit)
4879 seq_printf(m, "%s\n", trace_opts[i].name);
4881 seq_printf(m, "no%s\n", trace_opts[i].name);
4883 mutex_unlock(&trace_types_lock);
4888 static int __set_tracer_option(struct trace_array *tr,
4889 struct tracer_flags *tracer_flags,
4890 struct tracer_opt *opts, int neg)
4892 struct tracer *trace = tracer_flags->trace;
4895 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4900 tracer_flags->val &= ~opts->bit;
4902 tracer_flags->val |= opts->bit;
4906 /* Try to assign a tracer specific option */
4907 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4909 struct tracer *trace = tr->current_trace;
4910 struct tracer_flags *tracer_flags = trace->flags;
4911 struct tracer_opt *opts = NULL;
4914 for (i = 0; tracer_flags->opts[i].name; i++) {
4915 opts = &tracer_flags->opts[i];
4917 if (strcmp(cmp, opts->name) == 0)
4918 return __set_tracer_option(tr, trace->flags, opts, neg);
4924 /* Some tracers require overwrite to stay enabled */
4925 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4927 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4933 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4935 if ((mask == TRACE_ITER_RECORD_TGID) ||
4936 (mask == TRACE_ITER_RECORD_CMD))
4937 lockdep_assert_held(&event_mutex);
4939 /* do nothing if flag is already set */
4940 if (!!(tr->trace_flags & mask) == !!enabled)
4943 /* Give the tracer a chance to approve the change */
4944 if (tr->current_trace->flag_changed)
4945 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4949 tr->trace_flags |= mask;
4951 tr->trace_flags &= ~mask;
4953 if (mask == TRACE_ITER_RECORD_CMD)
4954 trace_event_enable_cmd_record(enabled);
4956 if (mask == TRACE_ITER_RECORD_TGID) {
4958 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
4962 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4966 trace_event_enable_tgid_record(enabled);
4969 if (mask == TRACE_ITER_EVENT_FORK)
4970 trace_event_follow_fork(tr, enabled);
4972 if (mask == TRACE_ITER_FUNC_FORK)
4973 ftrace_pid_follow_fork(tr, enabled);
4975 if (mask == TRACE_ITER_OVERWRITE) {
4976 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
4977 #ifdef CONFIG_TRACER_MAX_TRACE
4978 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4982 if (mask == TRACE_ITER_PRINTK) {
4983 trace_printk_start_stop_comm(enabled);
4984 trace_printk_control(enabled);
4990 int trace_set_options(struct trace_array *tr, char *option)
4995 size_t orig_len = strlen(option);
4998 cmp = strstrip(option);
5000 len = str_has_prefix(cmp, "no");
5006 mutex_lock(&event_mutex);
5007 mutex_lock(&trace_types_lock);
5009 ret = match_string(trace_options, -1, cmp);
5010 /* If no option could be set, test the specific tracer options */
5012 ret = set_tracer_option(tr, cmp, neg);
5014 ret = set_tracer_flag(tr, 1 << ret, !neg);
5016 mutex_unlock(&trace_types_lock);
5017 mutex_unlock(&event_mutex);
5020 * If the first trailing whitespace is replaced with '\0' by strstrip,
5021 * turn it back into a space.
5023 if (orig_len > strlen(option))
5024 option[strlen(option)] = ' ';
5029 static void __init apply_trace_boot_options(void)
5031 char *buf = trace_boot_options_buf;
5035 option = strsep(&buf, ",");
5041 trace_set_options(&global_trace, option);
5043 /* Put back the comma to allow this to be called again */
5050 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5051 size_t cnt, loff_t *ppos)
5053 struct seq_file *m = filp->private_data;
5054 struct trace_array *tr = m->private;
5058 if (cnt >= sizeof(buf))
5061 if (copy_from_user(buf, ubuf, cnt))
5066 ret = trace_set_options(tr, buf);
5075 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5077 struct trace_array *tr = inode->i_private;
5080 ret = tracing_check_open_get_tr(tr);
5084 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5086 trace_array_put(tr);
5091 static const struct file_operations tracing_iter_fops = {
5092 .open = tracing_trace_options_open,
5094 .llseek = seq_lseek,
5095 .release = tracing_single_release_tr,
5096 .write = tracing_trace_options_write,
5099 static const char readme_msg[] =
5100 "tracing mini-HOWTO:\n\n"
5101 "# echo 0 > tracing_on : quick way to disable tracing\n"
5102 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5103 " Important files:\n"
5104 " trace\t\t\t- The static contents of the buffer\n"
5105 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5106 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5107 " current_tracer\t- function and latency tracers\n"
5108 " available_tracers\t- list of configured tracers for current_tracer\n"
5109 " error_log\t- error log for failed commands (that support it)\n"
5110 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5111 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5112 " trace_clock\t\t-change the clock used to order events\n"
5113 " local: Per cpu clock but may not be synced across CPUs\n"
5114 " global: Synced across CPUs but slows tracing down.\n"
5115 " counter: Not a clock, but just an increment\n"
5116 " uptime: Jiffy counter from time of boot\n"
5117 " perf: Same clock that perf events use\n"
5118 #ifdef CONFIG_X86_64
5119 " x86-tsc: TSC cycle counter\n"
5121 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5122 " delta: Delta difference against a buffer-wide timestamp\n"
5123 " absolute: Absolute (standalone) timestamp\n"
5124 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5125 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5126 " tracing_cpumask\t- Limit which CPUs to trace\n"
5127 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5128 "\t\t\t Remove sub-buffer with rmdir\n"
5129 " trace_options\t\t- Set format or modify how tracing happens\n"
5130 "\t\t\t Disable an option by prefixing 'no' to the\n"
5131 "\t\t\t option name\n"
5132 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5133 #ifdef CONFIG_DYNAMIC_FTRACE
5134 "\n available_filter_functions - list of functions that can be filtered on\n"
5135 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5136 "\t\t\t functions\n"
5137 "\t accepts: func_full_name or glob-matching-pattern\n"
5138 "\t modules: Can select a group via module\n"
5139 "\t Format: :mod:<module-name>\n"
5140 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5141 "\t triggers: a command to perform when function is hit\n"
5142 "\t Format: <function>:<trigger>[:count]\n"
5143 "\t trigger: traceon, traceoff\n"
5144 "\t\t enable_event:<system>:<event>\n"
5145 "\t\t disable_event:<system>:<event>\n"
5146 #ifdef CONFIG_STACKTRACE
5149 #ifdef CONFIG_TRACER_SNAPSHOT
5154 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5155 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5156 "\t The first one will disable tracing every time do_fault is hit\n"
5157 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5158 "\t The first time do trap is hit and it disables tracing, the\n"
5159 "\t counter will decrement to 2. If tracing is already disabled,\n"
5160 "\t the counter will not decrement. It only decrements when the\n"
5161 "\t trigger did work\n"
5162 "\t To remove trigger without count:\n"
5163 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5164 "\t To remove trigger with a count:\n"
5165 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5166 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5167 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5168 "\t modules: Can select a group via module command :mod:\n"
5169 "\t Does not accept triggers\n"
5170 #endif /* CONFIG_DYNAMIC_FTRACE */
5171 #ifdef CONFIG_FUNCTION_TRACER
5172 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5174 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5177 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5178 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5179 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5180 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5182 #ifdef CONFIG_TRACER_SNAPSHOT
5183 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5184 "\t\t\t snapshot buffer. Read the contents for more\n"
5185 "\t\t\t information\n"
5187 #ifdef CONFIG_STACK_TRACER
5188 " stack_trace\t\t- Shows the max stack trace when active\n"
5189 " stack_max_size\t- Shows current max stack size that was traced\n"
5190 "\t\t\t Write into this file to reset the max size (trigger a\n"
5191 "\t\t\t new trace)\n"
5192 #ifdef CONFIG_DYNAMIC_FTRACE
5193 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5196 #endif /* CONFIG_STACK_TRACER */
5197 #ifdef CONFIG_DYNAMIC_EVENTS
5198 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5199 "\t\t\t Write into this file to define/undefine new trace events.\n"
5201 #ifdef CONFIG_KPROBE_EVENTS
5202 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5203 "\t\t\t Write into this file to define/undefine new trace events.\n"
5205 #ifdef CONFIG_UPROBE_EVENTS
5206 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5207 "\t\t\t Write into this file to define/undefine new trace events.\n"
5209 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5210 "\t accepts: event-definitions (one definition per line)\n"
5211 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5212 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5213 #ifdef CONFIG_HIST_TRIGGERS
5214 "\t s:[synthetic/]<event> <field> [<field>]\n"
5216 "\t -:[<group>/]<event>\n"
5217 #ifdef CONFIG_KPROBE_EVENTS
5218 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5219 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5221 #ifdef CONFIG_UPROBE_EVENTS
5222 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5224 "\t args: <name>=fetcharg[:type]\n"
5225 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5226 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5227 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5229 "\t $stack<index>, $stack, $retval, $comm,\n"
5231 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5232 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5233 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5234 "\t <type>\\[<array-size>\\]\n"
5235 #ifdef CONFIG_HIST_TRIGGERS
5236 "\t field: <stype> <name>;\n"
5237 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5238 "\t [unsigned] char/int/long\n"
5241 " events/\t\t- Directory containing all trace event subsystems:\n"
5242 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5243 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5244 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5246 " filter\t\t- If set, only events passing filter are traced\n"
5247 " events/<system>/<event>/\t- Directory containing control files for\n"
5249 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5250 " filter\t\t- If set, only events passing filter are traced\n"
5251 " trigger\t\t- If set, a command to perform when event is hit\n"
5252 "\t Format: <trigger>[:count][if <filter>]\n"
5253 "\t trigger: traceon, traceoff\n"
5254 "\t enable_event:<system>:<event>\n"
5255 "\t disable_event:<system>:<event>\n"
5256 #ifdef CONFIG_HIST_TRIGGERS
5257 "\t enable_hist:<system>:<event>\n"
5258 "\t disable_hist:<system>:<event>\n"
5260 #ifdef CONFIG_STACKTRACE
5263 #ifdef CONFIG_TRACER_SNAPSHOT
5266 #ifdef CONFIG_HIST_TRIGGERS
5267 "\t\t hist (see below)\n"
5269 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5270 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5271 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5272 "\t events/block/block_unplug/trigger\n"
5273 "\t The first disables tracing every time block_unplug is hit.\n"
5274 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5275 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5276 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5277 "\t Like function triggers, the counter is only decremented if it\n"
5278 "\t enabled or disabled tracing.\n"
5279 "\t To remove a trigger without a count:\n"
5280 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5281 "\t To remove a trigger with a count:\n"
5282 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5283 "\t Filters can be ignored when removing a trigger.\n"
5284 #ifdef CONFIG_HIST_TRIGGERS
5285 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5286 "\t Format: hist:keys=<field1[,field2,...]>\n"
5287 "\t [:values=<field1[,field2,...]>]\n"
5288 "\t [:sort=<field1[,field2,...]>]\n"
5289 "\t [:size=#entries]\n"
5290 "\t [:pause][:continue][:clear]\n"
5291 "\t [:name=histname1]\n"
5292 "\t [:<handler>.<action>]\n"
5293 "\t [if <filter>]\n\n"
5294 "\t When a matching event is hit, an entry is added to a hash\n"
5295 "\t table using the key(s) and value(s) named, and the value of a\n"
5296 "\t sum called 'hitcount' is incremented. Keys and values\n"
5297 "\t correspond to fields in the event's format description. Keys\n"
5298 "\t can be any field, or the special string 'stacktrace'.\n"
5299 "\t Compound keys consisting of up to two fields can be specified\n"
5300 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5301 "\t fields. Sort keys consisting of up to two fields can be\n"
5302 "\t specified using the 'sort' keyword. The sort direction can\n"
5303 "\t be modified by appending '.descending' or '.ascending' to a\n"
5304 "\t sort field. The 'size' parameter can be used to specify more\n"
5305 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5306 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5307 "\t its histogram data will be shared with other triggers of the\n"
5308 "\t same name, and trigger hits will update this common data.\n\n"
5309 "\t Reading the 'hist' file for the event will dump the hash\n"
5310 "\t table in its entirety to stdout. If there are multiple hist\n"
5311 "\t triggers attached to an event, there will be a table for each\n"
5312 "\t trigger in the output. The table displayed for a named\n"
5313 "\t trigger will be the same as any other instance having the\n"
5314 "\t same name. The default format used to display a given field\n"
5315 "\t can be modified by appending any of the following modifiers\n"
5316 "\t to the field name, as applicable:\n\n"
5317 "\t .hex display a number as a hex value\n"
5318 "\t .sym display an address as a symbol\n"
5319 "\t .sym-offset display an address as a symbol and offset\n"
5320 "\t .execname display a common_pid as a program name\n"
5321 "\t .syscall display a syscall id as a syscall name\n"
5322 "\t .log2 display log2 value rather than raw number\n"
5323 "\t .usecs display a common_timestamp in microseconds\n\n"
5324 "\t The 'pause' parameter can be used to pause an existing hist\n"
5325 "\t trigger or to start a hist trigger but not log any events\n"
5326 "\t until told to do so. 'continue' can be used to start or\n"
5327 "\t restart a paused hist trigger.\n\n"
5328 "\t The 'clear' parameter will clear the contents of a running\n"
5329 "\t hist trigger and leave its current paused/active state\n"
5331 "\t The enable_hist and disable_hist triggers can be used to\n"
5332 "\t have one event conditionally start and stop another event's\n"
5333 "\t already-attached hist trigger. The syntax is analogous to\n"
5334 "\t the enable_event and disable_event triggers.\n\n"
5335 "\t Hist trigger handlers and actions are executed whenever a\n"
5336 "\t a histogram entry is added or updated. They take the form:\n\n"
5337 "\t <handler>.<action>\n\n"
5338 "\t The available handlers are:\n\n"
5339 "\t onmatch(matching.event) - invoke on addition or update\n"
5340 "\t onmax(var) - invoke if var exceeds current max\n"
5341 "\t onchange(var) - invoke action if var changes\n\n"
5342 "\t The available actions are:\n\n"
5343 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5344 "\t save(field,...) - save current event fields\n"
5345 #ifdef CONFIG_TRACER_SNAPSHOT
5346 "\t snapshot() - snapshot the trace buffer\n\n"
5348 #ifdef CONFIG_SYNTH_EVENTS
5349 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5350 "\t Write into this file to define/undefine new synthetic events.\n"
5351 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5357 tracing_readme_read(struct file *filp, char __user *ubuf,
5358 size_t cnt, loff_t *ppos)
5360 return simple_read_from_buffer(ubuf, cnt, ppos,
5361 readme_msg, strlen(readme_msg));
5364 static const struct file_operations tracing_readme_fops = {
5365 .open = tracing_open_generic,
5366 .read = tracing_readme_read,
5367 .llseek = generic_file_llseek,
5370 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5374 if (*pos || m->count)
5379 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5380 if (trace_find_tgid(*ptr))
5387 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5397 v = saved_tgids_next(m, v, &l);
5405 static void saved_tgids_stop(struct seq_file *m, void *v)
5409 static int saved_tgids_show(struct seq_file *m, void *v)
5411 int pid = (int *)v - tgid_map;
5413 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5417 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5418 .start = saved_tgids_start,
5419 .stop = saved_tgids_stop,
5420 .next = saved_tgids_next,
5421 .show = saved_tgids_show,
5424 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5428 ret = tracing_check_open_get_tr(NULL);
5432 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5436 static const struct file_operations tracing_saved_tgids_fops = {
5437 .open = tracing_saved_tgids_open,
5439 .llseek = seq_lseek,
5440 .release = seq_release,
5443 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5445 unsigned int *ptr = v;
5447 if (*pos || m->count)
5452 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5454 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5463 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5469 arch_spin_lock(&trace_cmdline_lock);
5471 v = &savedcmd->map_cmdline_to_pid[0];
5473 v = saved_cmdlines_next(m, v, &l);
5481 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5483 arch_spin_unlock(&trace_cmdline_lock);
5487 static int saved_cmdlines_show(struct seq_file *m, void *v)
5489 char buf[TASK_COMM_LEN];
5490 unsigned int *pid = v;
5492 __trace_find_cmdline(*pid, buf);
5493 seq_printf(m, "%d %s\n", *pid, buf);
5497 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5498 .start = saved_cmdlines_start,
5499 .next = saved_cmdlines_next,
5500 .stop = saved_cmdlines_stop,
5501 .show = saved_cmdlines_show,
5504 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5508 ret = tracing_check_open_get_tr(NULL);
5512 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5515 static const struct file_operations tracing_saved_cmdlines_fops = {
5516 .open = tracing_saved_cmdlines_open,
5518 .llseek = seq_lseek,
5519 .release = seq_release,
5523 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5524 size_t cnt, loff_t *ppos)
5529 arch_spin_lock(&trace_cmdline_lock);
5530 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5531 arch_spin_unlock(&trace_cmdline_lock);
5533 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5536 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5538 kfree(s->saved_cmdlines);
5539 kfree(s->map_cmdline_to_pid);
5543 static int tracing_resize_saved_cmdlines(unsigned int val)
5545 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5547 s = kmalloc(sizeof(*s), GFP_KERNEL);
5551 if (allocate_cmdlines_buffer(val, s) < 0) {
5556 arch_spin_lock(&trace_cmdline_lock);
5557 savedcmd_temp = savedcmd;
5559 arch_spin_unlock(&trace_cmdline_lock);
5560 free_saved_cmdlines_buffer(savedcmd_temp);
5566 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5567 size_t cnt, loff_t *ppos)
5572 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5576 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5577 if (!val || val > PID_MAX_DEFAULT)
5580 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5589 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5590 .open = tracing_open_generic,
5591 .read = tracing_saved_cmdlines_size_read,
5592 .write = tracing_saved_cmdlines_size_write,
5595 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5596 static union trace_eval_map_item *
5597 update_eval_map(union trace_eval_map_item *ptr)
5599 if (!ptr->map.eval_string) {
5600 if (ptr->tail.next) {
5601 ptr = ptr->tail.next;
5602 /* Set ptr to the next real item (skip head) */
5610 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5612 union trace_eval_map_item *ptr = v;
5615 * Paranoid! If ptr points to end, we don't want to increment past it.
5616 * This really should never happen.
5619 ptr = update_eval_map(ptr);
5620 if (WARN_ON_ONCE(!ptr))
5624 ptr = update_eval_map(ptr);
5629 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5631 union trace_eval_map_item *v;
5634 mutex_lock(&trace_eval_mutex);
5636 v = trace_eval_maps;
5640 while (v && l < *pos) {
5641 v = eval_map_next(m, v, &l);
5647 static void eval_map_stop(struct seq_file *m, void *v)
5649 mutex_unlock(&trace_eval_mutex);
5652 static int eval_map_show(struct seq_file *m, void *v)
5654 union trace_eval_map_item *ptr = v;
5656 seq_printf(m, "%s %ld (%s)\n",
5657 ptr->map.eval_string, ptr->map.eval_value,
5663 static const struct seq_operations tracing_eval_map_seq_ops = {
5664 .start = eval_map_start,
5665 .next = eval_map_next,
5666 .stop = eval_map_stop,
5667 .show = eval_map_show,
5670 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5674 ret = tracing_check_open_get_tr(NULL);
5678 return seq_open(filp, &tracing_eval_map_seq_ops);
5681 static const struct file_operations tracing_eval_map_fops = {
5682 .open = tracing_eval_map_open,
5684 .llseek = seq_lseek,
5685 .release = seq_release,
5688 static inline union trace_eval_map_item *
5689 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5691 /* Return tail of array given the head */
5692 return ptr + ptr->head.length + 1;
5696 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5699 struct trace_eval_map **stop;
5700 struct trace_eval_map **map;
5701 union trace_eval_map_item *map_array;
5702 union trace_eval_map_item *ptr;
5707 * The trace_eval_maps contains the map plus a head and tail item,
5708 * where the head holds the module and length of array, and the
5709 * tail holds a pointer to the next list.
5711 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5713 pr_warn("Unable to allocate trace eval mapping\n");
5717 mutex_lock(&trace_eval_mutex);
5719 if (!trace_eval_maps)
5720 trace_eval_maps = map_array;
5722 ptr = trace_eval_maps;
5724 ptr = trace_eval_jmp_to_tail(ptr);
5725 if (!ptr->tail.next)
5727 ptr = ptr->tail.next;
5730 ptr->tail.next = map_array;
5732 map_array->head.mod = mod;
5733 map_array->head.length = len;
5736 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5737 map_array->map = **map;
5740 memset(map_array, 0, sizeof(*map_array));
5742 mutex_unlock(&trace_eval_mutex);
5745 static void trace_create_eval_file(struct dentry *d_tracer)
5747 trace_create_file("eval_map", 0444, d_tracer,
5748 NULL, &tracing_eval_map_fops);
5751 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5752 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5753 static inline void trace_insert_eval_map_file(struct module *mod,
5754 struct trace_eval_map **start, int len) { }
5755 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5757 static void trace_insert_eval_map(struct module *mod,
5758 struct trace_eval_map **start, int len)
5760 struct trace_eval_map **map;
5767 trace_event_eval_update(map, len);
5769 trace_insert_eval_map_file(mod, start, len);
5773 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5774 size_t cnt, loff_t *ppos)
5776 struct trace_array *tr = filp->private_data;
5777 char buf[MAX_TRACER_SIZE+2];
5780 mutex_lock(&trace_types_lock);
5781 r = sprintf(buf, "%s\n", tr->current_trace->name);
5782 mutex_unlock(&trace_types_lock);
5784 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5787 int tracer_init(struct tracer *t, struct trace_array *tr)
5789 tracing_reset_online_cpus(&tr->array_buffer);
5793 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5797 for_each_tracing_cpu(cpu)
5798 per_cpu_ptr(buf->data, cpu)->entries = val;
5801 #ifdef CONFIG_TRACER_MAX_TRACE
5802 /* resize @tr's buffer to the size of @size_tr's entries */
5803 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5804 struct array_buffer *size_buf, int cpu_id)
5808 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5809 for_each_tracing_cpu(cpu) {
5810 ret = ring_buffer_resize(trace_buf->buffer,
5811 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5814 per_cpu_ptr(trace_buf->data, cpu)->entries =
5815 per_cpu_ptr(size_buf->data, cpu)->entries;
5818 ret = ring_buffer_resize(trace_buf->buffer,
5819 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5821 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5822 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5827 #endif /* CONFIG_TRACER_MAX_TRACE */
5829 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5830 unsigned long size, int cpu)
5835 * If kernel or user changes the size of the ring buffer
5836 * we use the size that was given, and we can forget about
5837 * expanding it later.
5839 ring_buffer_expanded = true;
5841 /* May be called before buffers are initialized */
5842 if (!tr->array_buffer.buffer)
5845 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5849 #ifdef CONFIG_TRACER_MAX_TRACE
5850 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5851 !tr->current_trace->use_max_tr)
5854 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5856 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5857 &tr->array_buffer, cpu);
5860 * AARGH! We are left with different
5861 * size max buffer!!!!
5862 * The max buffer is our "snapshot" buffer.
5863 * When a tracer needs a snapshot (one of the
5864 * latency tracers), it swaps the max buffer
5865 * with the saved snap shot. We succeeded to
5866 * update the size of the main buffer, but failed to
5867 * update the size of the max buffer. But when we tried
5868 * to reset the main buffer to the original size, we
5869 * failed there too. This is very unlikely to
5870 * happen, but if it does, warn and kill all
5874 tracing_disabled = 1;
5879 if (cpu == RING_BUFFER_ALL_CPUS)
5880 set_buffer_entries(&tr->max_buffer, size);
5882 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5885 #endif /* CONFIG_TRACER_MAX_TRACE */
5887 if (cpu == RING_BUFFER_ALL_CPUS)
5888 set_buffer_entries(&tr->array_buffer, size);
5890 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
5895 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5896 unsigned long size, int cpu_id)
5900 mutex_lock(&trace_types_lock);
5902 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5903 /* make sure, this cpu is enabled in the mask */
5904 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5910 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5915 mutex_unlock(&trace_types_lock);
5922 * tracing_update_buffers - used by tracing facility to expand ring buffers
5924 * To save on memory when the tracing is never used on a system with it
5925 * configured in. The ring buffers are set to a minimum size. But once
5926 * a user starts to use the tracing facility, then they need to grow
5927 * to their default size.
5929 * This function is to be called when a tracer is about to be used.
5931 int tracing_update_buffers(void)
5935 mutex_lock(&trace_types_lock);
5936 if (!ring_buffer_expanded)
5937 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5938 RING_BUFFER_ALL_CPUS);
5939 mutex_unlock(&trace_types_lock);
5944 struct trace_option_dentry;
5947 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5950 * Used to clear out the tracer before deletion of an instance.
5951 * Must have trace_types_lock held.
5953 static void tracing_set_nop(struct trace_array *tr)
5955 if (tr->current_trace == &nop_trace)
5958 tr->current_trace->enabled--;
5960 if (tr->current_trace->reset)
5961 tr->current_trace->reset(tr);
5963 tr->current_trace = &nop_trace;
5966 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5968 /* Only enable if the directory has been created already. */
5972 create_trace_option_files(tr, t);
5975 int tracing_set_tracer(struct trace_array *tr, const char *buf)
5978 #ifdef CONFIG_TRACER_MAX_TRACE
5983 mutex_lock(&trace_types_lock);
5985 if (!ring_buffer_expanded) {
5986 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5987 RING_BUFFER_ALL_CPUS);
5993 for (t = trace_types; t; t = t->next) {
5994 if (strcmp(t->name, buf) == 0)
6001 if (t == tr->current_trace)
6004 #ifdef CONFIG_TRACER_SNAPSHOT
6005 if (t->use_max_tr) {
6006 arch_spin_lock(&tr->max_lock);
6007 if (tr->cond_snapshot)
6009 arch_spin_unlock(&tr->max_lock);
6014 /* Some tracers won't work on kernel command line */
6015 if (system_state < SYSTEM_RUNNING && t->noboot) {
6016 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6021 /* Some tracers are only allowed for the top level buffer */
6022 if (!trace_ok_for_array(t, tr)) {
6027 /* If trace pipe files are being read, we can't change the tracer */
6028 if (tr->trace_ref) {
6033 trace_branch_disable();
6035 tr->current_trace->enabled--;
6037 if (tr->current_trace->reset)
6038 tr->current_trace->reset(tr);
6040 /* Current trace needs to be nop_trace before synchronize_rcu */
6041 tr->current_trace = &nop_trace;
6043 #ifdef CONFIG_TRACER_MAX_TRACE
6044 had_max_tr = tr->allocated_snapshot;
6046 if (had_max_tr && !t->use_max_tr) {
6048 * We need to make sure that the update_max_tr sees that
6049 * current_trace changed to nop_trace to keep it from
6050 * swapping the buffers after we resize it.
6051 * The update_max_tr is called from interrupts disabled
6052 * so a synchronized_sched() is sufficient.
6059 #ifdef CONFIG_TRACER_MAX_TRACE
6060 if (t->use_max_tr && !had_max_tr) {
6061 ret = tracing_alloc_snapshot_instance(tr);
6068 ret = tracer_init(t, tr);
6073 tr->current_trace = t;
6074 tr->current_trace->enabled++;
6075 trace_branch_enable(tr);
6077 mutex_unlock(&trace_types_lock);
6083 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6084 size_t cnt, loff_t *ppos)
6086 struct trace_array *tr = filp->private_data;
6087 char buf[MAX_TRACER_SIZE+1];
6094 if (cnt > MAX_TRACER_SIZE)
6095 cnt = MAX_TRACER_SIZE;
6097 if (copy_from_user(buf, ubuf, cnt))
6102 /* strip ending whitespace. */
6103 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6106 err = tracing_set_tracer(tr, buf);
6116 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6117 size_t cnt, loff_t *ppos)
6122 r = snprintf(buf, sizeof(buf), "%ld\n",
6123 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6124 if (r > sizeof(buf))
6126 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6130 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6131 size_t cnt, loff_t *ppos)
6136 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6146 tracing_thresh_read(struct file *filp, char __user *ubuf,
6147 size_t cnt, loff_t *ppos)
6149 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6153 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6154 size_t cnt, loff_t *ppos)
6156 struct trace_array *tr = filp->private_data;
6159 mutex_lock(&trace_types_lock);
6160 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6164 if (tr->current_trace->update_thresh) {
6165 ret = tr->current_trace->update_thresh(tr);
6172 mutex_unlock(&trace_types_lock);
6177 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6180 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6181 size_t cnt, loff_t *ppos)
6183 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6187 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6188 size_t cnt, loff_t *ppos)
6190 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6195 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6197 struct trace_array *tr = inode->i_private;
6198 struct trace_iterator *iter;
6201 ret = tracing_check_open_get_tr(tr);
6205 mutex_lock(&trace_types_lock);
6207 /* create a buffer to store the information to pass to userspace */
6208 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6211 __trace_array_put(tr);
6215 trace_seq_init(&iter->seq);
6216 iter->trace = tr->current_trace;
6218 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6223 /* trace pipe does not show start of buffer */
6224 cpumask_setall(iter->started);
6226 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6227 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6229 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6230 if (trace_clocks[tr->clock_id].in_ns)
6231 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6234 iter->array_buffer = &tr->array_buffer;
6235 iter->cpu_file = tracing_get_cpu(inode);
6236 mutex_init(&iter->mutex);
6237 filp->private_data = iter;
6239 if (iter->trace->pipe_open)
6240 iter->trace->pipe_open(iter);
6242 nonseekable_open(inode, filp);
6246 mutex_unlock(&trace_types_lock);
6251 __trace_array_put(tr);
6252 mutex_unlock(&trace_types_lock);
6256 static int tracing_release_pipe(struct inode *inode, struct file *file)
6258 struct trace_iterator *iter = file->private_data;
6259 struct trace_array *tr = inode->i_private;
6261 mutex_lock(&trace_types_lock);
6265 if (iter->trace->pipe_close)
6266 iter->trace->pipe_close(iter);
6268 mutex_unlock(&trace_types_lock);
6270 free_cpumask_var(iter->started);
6271 mutex_destroy(&iter->mutex);
6274 trace_array_put(tr);
6280 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6282 struct trace_array *tr = iter->tr;
6284 /* Iterators are static, they should be filled or empty */
6285 if (trace_buffer_iter(iter, iter->cpu_file))
6286 return EPOLLIN | EPOLLRDNORM;
6288 if (tr->trace_flags & TRACE_ITER_BLOCK)
6290 * Always select as readable when in blocking mode
6292 return EPOLLIN | EPOLLRDNORM;
6294 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6299 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6301 struct trace_iterator *iter = filp->private_data;
6303 return trace_poll(iter, filp, poll_table);
6306 /* Must be called with iter->mutex held. */
6307 static int tracing_wait_pipe(struct file *filp)
6309 struct trace_iterator *iter = filp->private_data;
6312 while (trace_empty(iter)) {
6314 if ((filp->f_flags & O_NONBLOCK)) {
6319 * We block until we read something and tracing is disabled.
6320 * We still block if tracing is disabled, but we have never
6321 * read anything. This allows a user to cat this file, and
6322 * then enable tracing. But after we have read something,
6323 * we give an EOF when tracing is again disabled.
6325 * iter->pos will be 0 if we haven't read anything.
6327 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6330 mutex_unlock(&iter->mutex);
6332 ret = wait_on_pipe(iter, 0);
6334 mutex_lock(&iter->mutex);
6347 tracing_read_pipe(struct file *filp, char __user *ubuf,
6348 size_t cnt, loff_t *ppos)
6350 struct trace_iterator *iter = filp->private_data;
6354 * Avoid more than one consumer on a single file descriptor
6355 * This is just a matter of traces coherency, the ring buffer itself
6358 mutex_lock(&iter->mutex);
6360 /* return any leftover data */
6361 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6365 trace_seq_init(&iter->seq);
6367 if (iter->trace->read) {
6368 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6374 sret = tracing_wait_pipe(filp);
6378 /* stop when tracing is finished */
6379 if (trace_empty(iter)) {
6384 if (cnt >= PAGE_SIZE)
6385 cnt = PAGE_SIZE - 1;
6387 /* reset all but tr, trace, and overruns */
6388 memset(&iter->seq, 0,
6389 sizeof(struct trace_iterator) -
6390 offsetof(struct trace_iterator, seq));
6391 cpumask_clear(iter->started);
6392 trace_seq_init(&iter->seq);
6395 trace_event_read_lock();
6396 trace_access_lock(iter->cpu_file);
6397 while (trace_find_next_entry_inc(iter) != NULL) {
6398 enum print_line_t ret;
6399 int save_len = iter->seq.seq.len;
6401 ret = print_trace_line(iter);
6402 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6403 /* don't print partial lines */
6404 iter->seq.seq.len = save_len;
6407 if (ret != TRACE_TYPE_NO_CONSUME)
6408 trace_consume(iter);
6410 if (trace_seq_used(&iter->seq) >= cnt)
6414 * Setting the full flag means we reached the trace_seq buffer
6415 * size and we should leave by partial output condition above.
6416 * One of the trace_seq_* functions is not used properly.
6418 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6421 trace_access_unlock(iter->cpu_file);
6422 trace_event_read_unlock();
6424 /* Now copy what we have to the user */
6425 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6426 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6427 trace_seq_init(&iter->seq);
6430 * If there was nothing to send to user, in spite of consuming trace
6431 * entries, go back to wait for more entries.
6437 mutex_unlock(&iter->mutex);
6442 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6445 __free_page(spd->pages[idx]);
6449 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6455 /* Seq buffer is page-sized, exactly what we need. */
6457 save_len = iter->seq.seq.len;
6458 ret = print_trace_line(iter);
6460 if (trace_seq_has_overflowed(&iter->seq)) {
6461 iter->seq.seq.len = save_len;
6466 * This should not be hit, because it should only
6467 * be set if the iter->seq overflowed. But check it
6468 * anyway to be safe.
6470 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6471 iter->seq.seq.len = save_len;
6475 count = trace_seq_used(&iter->seq) - save_len;
6478 iter->seq.seq.len = save_len;
6482 if (ret != TRACE_TYPE_NO_CONSUME)
6483 trace_consume(iter);
6485 if (!trace_find_next_entry_inc(iter)) {
6495 static ssize_t tracing_splice_read_pipe(struct file *filp,
6497 struct pipe_inode_info *pipe,
6501 struct page *pages_def[PIPE_DEF_BUFFERS];
6502 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6503 struct trace_iterator *iter = filp->private_data;
6504 struct splice_pipe_desc spd = {
6506 .partial = partial_def,
6507 .nr_pages = 0, /* This gets updated below. */
6508 .nr_pages_max = PIPE_DEF_BUFFERS,
6509 .ops = &default_pipe_buf_ops,
6510 .spd_release = tracing_spd_release_pipe,
6516 if (splice_grow_spd(pipe, &spd))
6519 mutex_lock(&iter->mutex);
6521 if (iter->trace->splice_read) {
6522 ret = iter->trace->splice_read(iter, filp,
6523 ppos, pipe, len, flags);
6528 ret = tracing_wait_pipe(filp);
6532 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6537 trace_event_read_lock();
6538 trace_access_lock(iter->cpu_file);
6540 /* Fill as many pages as possible. */
6541 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6542 spd.pages[i] = alloc_page(GFP_KERNEL);
6546 rem = tracing_fill_pipe_page(rem, iter);
6548 /* Copy the data into the page, so we can start over. */
6549 ret = trace_seq_to_buffer(&iter->seq,
6550 page_address(spd.pages[i]),
6551 trace_seq_used(&iter->seq));
6553 __free_page(spd.pages[i]);
6556 spd.partial[i].offset = 0;
6557 spd.partial[i].len = trace_seq_used(&iter->seq);
6559 trace_seq_init(&iter->seq);
6562 trace_access_unlock(iter->cpu_file);
6563 trace_event_read_unlock();
6564 mutex_unlock(&iter->mutex);
6569 ret = splice_to_pipe(pipe, &spd);
6573 splice_shrink_spd(&spd);
6577 mutex_unlock(&iter->mutex);
6582 tracing_entries_read(struct file *filp, char __user *ubuf,
6583 size_t cnt, loff_t *ppos)
6585 struct inode *inode = file_inode(filp);
6586 struct trace_array *tr = inode->i_private;
6587 int cpu = tracing_get_cpu(inode);
6592 mutex_lock(&trace_types_lock);
6594 if (cpu == RING_BUFFER_ALL_CPUS) {
6595 int cpu, buf_size_same;
6600 /* check if all cpu sizes are same */
6601 for_each_tracing_cpu(cpu) {
6602 /* fill in the size from first enabled cpu */
6604 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6605 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6611 if (buf_size_same) {
6612 if (!ring_buffer_expanded)
6613 r = sprintf(buf, "%lu (expanded: %lu)\n",
6615 trace_buf_size >> 10);
6617 r = sprintf(buf, "%lu\n", size >> 10);
6619 r = sprintf(buf, "X\n");
6621 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6623 mutex_unlock(&trace_types_lock);
6625 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6630 tracing_entries_write(struct file *filp, const char __user *ubuf,
6631 size_t cnt, loff_t *ppos)
6633 struct inode *inode = file_inode(filp);
6634 struct trace_array *tr = inode->i_private;
6638 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6642 /* must have at least 1 entry */
6646 /* value is in KB */
6648 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6658 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6659 size_t cnt, loff_t *ppos)
6661 struct trace_array *tr = filp->private_data;
6664 unsigned long size = 0, expanded_size = 0;
6666 mutex_lock(&trace_types_lock);
6667 for_each_tracing_cpu(cpu) {
6668 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6669 if (!ring_buffer_expanded)
6670 expanded_size += trace_buf_size >> 10;
6672 if (ring_buffer_expanded)
6673 r = sprintf(buf, "%lu\n", size);
6675 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6676 mutex_unlock(&trace_types_lock);
6678 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6682 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6683 size_t cnt, loff_t *ppos)
6686 * There is no need to read what the user has written, this function
6687 * is just to make sure that there is no error when "echo" is used
6696 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6698 struct trace_array *tr = inode->i_private;
6700 /* disable tracing ? */
6701 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6702 tracer_tracing_off(tr);
6703 /* resize the ring buffer to 0 */
6704 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6706 trace_array_put(tr);
6712 tracing_mark_write(struct file *filp, const char __user *ubuf,
6713 size_t cnt, loff_t *fpos)
6715 struct trace_array *tr = filp->private_data;
6716 struct ring_buffer_event *event;
6717 enum event_trigger_type tt = ETT_NONE;
6718 struct trace_buffer *buffer;
6719 struct print_entry *entry;
6724 /* Used in tracing_mark_raw_write() as well */
6725 #define FAULTED_STR "<faulted>"
6726 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6728 if (tracing_disabled)
6731 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6734 if (cnt > TRACE_BUF_SIZE)
6735 cnt = TRACE_BUF_SIZE;
6737 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6739 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6741 /* If less than "<faulted>", then make sure we can still add that */
6742 if (cnt < FAULTED_SIZE)
6743 size += FAULTED_SIZE - cnt;
6745 buffer = tr->array_buffer.buffer;
6746 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6748 if (unlikely(!event))
6749 /* Ring buffer disabled, return as if not open for write */
6752 entry = ring_buffer_event_data(event);
6753 entry->ip = _THIS_IP_;
6755 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6757 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6763 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6764 /* do not add \n before testing triggers, but add \0 */
6765 entry->buf[cnt] = '\0';
6766 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6769 if (entry->buf[cnt - 1] != '\n') {
6770 entry->buf[cnt] = '\n';
6771 entry->buf[cnt + 1] = '\0';
6773 entry->buf[cnt] = '\0';
6775 if (static_branch_unlikely(&trace_marker_exports_enabled))
6776 ftrace_exports(event, TRACE_EXPORT_MARKER);
6777 __buffer_unlock_commit(buffer, event);
6780 event_triggers_post_call(tr->trace_marker_file, tt);
6788 /* Limit it for now to 3K (including tag) */
6789 #define RAW_DATA_MAX_SIZE (1024*3)
6792 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6793 size_t cnt, loff_t *fpos)
6795 struct trace_array *tr = filp->private_data;
6796 struct ring_buffer_event *event;
6797 struct trace_buffer *buffer;
6798 struct raw_data_entry *entry;
6803 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6805 if (tracing_disabled)
6808 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6811 /* The marker must at least have a tag id */
6812 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6815 if (cnt > TRACE_BUF_SIZE)
6816 cnt = TRACE_BUF_SIZE;
6818 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6820 size = sizeof(*entry) + cnt;
6821 if (cnt < FAULT_SIZE_ID)
6822 size += FAULT_SIZE_ID - cnt;
6824 buffer = tr->array_buffer.buffer;
6825 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6828 /* Ring buffer disabled, return as if not open for write */
6831 entry = ring_buffer_event_data(event);
6833 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6836 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6841 __buffer_unlock_commit(buffer, event);
6849 static int tracing_clock_show(struct seq_file *m, void *v)
6851 struct trace_array *tr = m->private;
6854 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6856 "%s%s%s%s", i ? " " : "",
6857 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6858 i == tr->clock_id ? "]" : "");
6864 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6868 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6869 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6872 if (i == ARRAY_SIZE(trace_clocks))
6875 mutex_lock(&trace_types_lock);
6879 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6882 * New clock may not be consistent with the previous clock.
6883 * Reset the buffer so that it doesn't have incomparable timestamps.
6885 tracing_reset_online_cpus(&tr->array_buffer);
6887 #ifdef CONFIG_TRACER_MAX_TRACE
6888 if (tr->max_buffer.buffer)
6889 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6890 tracing_reset_online_cpus(&tr->max_buffer);
6893 mutex_unlock(&trace_types_lock);
6898 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6899 size_t cnt, loff_t *fpos)
6901 struct seq_file *m = filp->private_data;
6902 struct trace_array *tr = m->private;
6904 const char *clockstr;
6907 if (cnt >= sizeof(buf))
6910 if (copy_from_user(buf, ubuf, cnt))
6915 clockstr = strstrip(buf);
6917 ret = tracing_set_clock(tr, clockstr);
6926 static int tracing_clock_open(struct inode *inode, struct file *file)
6928 struct trace_array *tr = inode->i_private;
6931 ret = tracing_check_open_get_tr(tr);
6935 ret = single_open(file, tracing_clock_show, inode->i_private);
6937 trace_array_put(tr);
6942 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6944 struct trace_array *tr = m->private;
6946 mutex_lock(&trace_types_lock);
6948 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
6949 seq_puts(m, "delta [absolute]\n");
6951 seq_puts(m, "[delta] absolute\n");
6953 mutex_unlock(&trace_types_lock);
6958 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6960 struct trace_array *tr = inode->i_private;
6963 ret = tracing_check_open_get_tr(tr);
6967 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6969 trace_array_put(tr);
6974 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6978 mutex_lock(&trace_types_lock);
6980 if (abs && tr->time_stamp_abs_ref++)
6984 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6989 if (--tr->time_stamp_abs_ref)
6993 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
6995 #ifdef CONFIG_TRACER_MAX_TRACE
6996 if (tr->max_buffer.buffer)
6997 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
7000 mutex_unlock(&trace_types_lock);
7005 struct ftrace_buffer_info {
7006 struct trace_iterator iter;
7008 unsigned int spare_cpu;
7012 #ifdef CONFIG_TRACER_SNAPSHOT
7013 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7015 struct trace_array *tr = inode->i_private;
7016 struct trace_iterator *iter;
7020 ret = tracing_check_open_get_tr(tr);
7024 if (file->f_mode & FMODE_READ) {
7025 iter = __tracing_open(inode, file, true);
7027 ret = PTR_ERR(iter);
7029 /* Writes still need the seq_file to hold the private data */
7031 m = kzalloc(sizeof(*m), GFP_KERNEL);
7034 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7042 iter->array_buffer = &tr->max_buffer;
7043 iter->cpu_file = tracing_get_cpu(inode);
7045 file->private_data = m;
7049 trace_array_put(tr);
7055 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7058 struct seq_file *m = filp->private_data;
7059 struct trace_iterator *iter = m->private;
7060 struct trace_array *tr = iter->tr;
7064 ret = tracing_update_buffers();
7068 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7072 mutex_lock(&trace_types_lock);
7074 if (tr->current_trace->use_max_tr) {
7079 arch_spin_lock(&tr->max_lock);
7080 if (tr->cond_snapshot)
7082 arch_spin_unlock(&tr->max_lock);
7088 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7092 if (tr->allocated_snapshot)
7096 /* Only allow per-cpu swap if the ring buffer supports it */
7097 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7098 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7103 if (tr->allocated_snapshot)
7104 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7105 &tr->array_buffer, iter->cpu_file);
7107 ret = tracing_alloc_snapshot_instance(tr);
7110 local_irq_disable();
7111 /* Now, we're going to swap */
7112 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7113 update_max_tr(tr, current, smp_processor_id(), NULL);
7115 update_max_tr_single(tr, current, iter->cpu_file);
7119 if (tr->allocated_snapshot) {
7120 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7121 tracing_reset_online_cpus(&tr->max_buffer);
7123 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7133 mutex_unlock(&trace_types_lock);
7137 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7139 struct seq_file *m = file->private_data;
7142 ret = tracing_release(inode, file);
7144 if (file->f_mode & FMODE_READ)
7147 /* If write only, the seq_file is just a stub */
7155 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7156 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7157 size_t count, loff_t *ppos);
7158 static int tracing_buffers_release(struct inode *inode, struct file *file);
7159 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7160 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7162 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7164 struct ftrace_buffer_info *info;
7167 /* The following checks for tracefs lockdown */
7168 ret = tracing_buffers_open(inode, filp);
7172 info = filp->private_data;
7174 if (info->iter.trace->use_max_tr) {
7175 tracing_buffers_release(inode, filp);
7179 info->iter.snapshot = true;
7180 info->iter.array_buffer = &info->iter.tr->max_buffer;
7185 #endif /* CONFIG_TRACER_SNAPSHOT */
7188 static const struct file_operations tracing_thresh_fops = {
7189 .open = tracing_open_generic,
7190 .read = tracing_thresh_read,
7191 .write = tracing_thresh_write,
7192 .llseek = generic_file_llseek,
7195 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7196 static const struct file_operations tracing_max_lat_fops = {
7197 .open = tracing_open_generic,
7198 .read = tracing_max_lat_read,
7199 .write = tracing_max_lat_write,
7200 .llseek = generic_file_llseek,
7204 static const struct file_operations set_tracer_fops = {
7205 .open = tracing_open_generic,
7206 .read = tracing_set_trace_read,
7207 .write = tracing_set_trace_write,
7208 .llseek = generic_file_llseek,
7211 static const struct file_operations tracing_pipe_fops = {
7212 .open = tracing_open_pipe,
7213 .poll = tracing_poll_pipe,
7214 .read = tracing_read_pipe,
7215 .splice_read = tracing_splice_read_pipe,
7216 .release = tracing_release_pipe,
7217 .llseek = no_llseek,
7220 static const struct file_operations tracing_entries_fops = {
7221 .open = tracing_open_generic_tr,
7222 .read = tracing_entries_read,
7223 .write = tracing_entries_write,
7224 .llseek = generic_file_llseek,
7225 .release = tracing_release_generic_tr,
7228 static const struct file_operations tracing_total_entries_fops = {
7229 .open = tracing_open_generic_tr,
7230 .read = tracing_total_entries_read,
7231 .llseek = generic_file_llseek,
7232 .release = tracing_release_generic_tr,
7235 static const struct file_operations tracing_free_buffer_fops = {
7236 .open = tracing_open_generic_tr,
7237 .write = tracing_free_buffer_write,
7238 .release = tracing_free_buffer_release,
7241 static const struct file_operations tracing_mark_fops = {
7242 .open = tracing_open_generic_tr,
7243 .write = tracing_mark_write,
7244 .llseek = generic_file_llseek,
7245 .release = tracing_release_generic_tr,
7248 static const struct file_operations tracing_mark_raw_fops = {
7249 .open = tracing_open_generic_tr,
7250 .write = tracing_mark_raw_write,
7251 .llseek = generic_file_llseek,
7252 .release = tracing_release_generic_tr,
7255 static const struct file_operations trace_clock_fops = {
7256 .open = tracing_clock_open,
7258 .llseek = seq_lseek,
7259 .release = tracing_single_release_tr,
7260 .write = tracing_clock_write,
7263 static const struct file_operations trace_time_stamp_mode_fops = {
7264 .open = tracing_time_stamp_mode_open,
7266 .llseek = seq_lseek,
7267 .release = tracing_single_release_tr,
7270 #ifdef CONFIG_TRACER_SNAPSHOT
7271 static const struct file_operations snapshot_fops = {
7272 .open = tracing_snapshot_open,
7274 .write = tracing_snapshot_write,
7275 .llseek = tracing_lseek,
7276 .release = tracing_snapshot_release,
7279 static const struct file_operations snapshot_raw_fops = {
7280 .open = snapshot_raw_open,
7281 .read = tracing_buffers_read,
7282 .release = tracing_buffers_release,
7283 .splice_read = tracing_buffers_splice_read,
7284 .llseek = no_llseek,
7287 #endif /* CONFIG_TRACER_SNAPSHOT */
7289 #define TRACING_LOG_ERRS_MAX 8
7290 #define TRACING_LOG_LOC_MAX 128
7292 #define CMD_PREFIX " Command: "
7295 const char **errs; /* ptr to loc-specific array of err strings */
7296 u8 type; /* index into errs -> specific err string */
7297 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7301 struct tracing_log_err {
7302 struct list_head list;
7303 struct err_info info;
7304 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7305 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7308 static DEFINE_MUTEX(tracing_err_log_lock);
7310 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7312 struct tracing_log_err *err;
7314 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7315 err = kzalloc(sizeof(*err), GFP_KERNEL);
7317 err = ERR_PTR(-ENOMEM);
7318 tr->n_err_log_entries++;
7323 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7324 list_del(&err->list);
7330 * err_pos - find the position of a string within a command for error careting
7331 * @cmd: The tracing command that caused the error
7332 * @str: The string to position the caret at within @cmd
7334 * Finds the position of the first occurence of @str within @cmd. The
7335 * return value can be passed to tracing_log_err() for caret placement
7338 * Returns the index within @cmd of the first occurence of @str or 0
7339 * if @str was not found.
7341 unsigned int err_pos(char *cmd, const char *str)
7345 if (WARN_ON(!strlen(cmd)))
7348 found = strstr(cmd, str);
7356 * tracing_log_err - write an error to the tracing error log
7357 * @tr: The associated trace array for the error (NULL for top level array)
7358 * @loc: A string describing where the error occurred
7359 * @cmd: The tracing command that caused the error
7360 * @errs: The array of loc-specific static error strings
7361 * @type: The index into errs[], which produces the specific static err string
7362 * @pos: The position the caret should be placed in the cmd
7364 * Writes an error into tracing/error_log of the form:
7366 * <loc>: error: <text>
7370 * tracing/error_log is a small log file containing the last
7371 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7372 * unless there has been a tracing error, and the error log can be
7373 * cleared and have its memory freed by writing the empty string in
7374 * truncation mode to it i.e. echo > tracing/error_log.
7376 * NOTE: the @errs array along with the @type param are used to
7377 * produce a static error string - this string is not copied and saved
7378 * when the error is logged - only a pointer to it is saved. See
7379 * existing callers for examples of how static strings are typically
7380 * defined for use with tracing_log_err().
7382 void tracing_log_err(struct trace_array *tr,
7383 const char *loc, const char *cmd,
7384 const char **errs, u8 type, u8 pos)
7386 struct tracing_log_err *err;
7391 mutex_lock(&tracing_err_log_lock);
7392 err = get_tracing_log_err(tr);
7393 if (PTR_ERR(err) == -ENOMEM) {
7394 mutex_unlock(&tracing_err_log_lock);
7398 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7399 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7401 err->info.errs = errs;
7402 err->info.type = type;
7403 err->info.pos = pos;
7404 err->info.ts = local_clock();
7406 list_add_tail(&err->list, &tr->err_log);
7407 mutex_unlock(&tracing_err_log_lock);
7410 static void clear_tracing_err_log(struct trace_array *tr)
7412 struct tracing_log_err *err, *next;
7414 mutex_lock(&tracing_err_log_lock);
7415 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7416 list_del(&err->list);
7420 tr->n_err_log_entries = 0;
7421 mutex_unlock(&tracing_err_log_lock);
7424 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7426 struct trace_array *tr = m->private;
7428 mutex_lock(&tracing_err_log_lock);
7430 return seq_list_start(&tr->err_log, *pos);
7433 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7435 struct trace_array *tr = m->private;
7437 return seq_list_next(v, &tr->err_log, pos);
7440 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7442 mutex_unlock(&tracing_err_log_lock);
7445 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7449 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7451 for (i = 0; i < pos; i++)
7456 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7458 struct tracing_log_err *err = v;
7461 const char *err_text = err->info.errs[err->info.type];
7462 u64 sec = err->info.ts;
7465 nsec = do_div(sec, NSEC_PER_SEC);
7466 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7467 err->loc, err_text);
7468 seq_printf(m, "%s", err->cmd);
7469 tracing_err_log_show_pos(m, err->info.pos);
7475 static const struct seq_operations tracing_err_log_seq_ops = {
7476 .start = tracing_err_log_seq_start,
7477 .next = tracing_err_log_seq_next,
7478 .stop = tracing_err_log_seq_stop,
7479 .show = tracing_err_log_seq_show
7482 static int tracing_err_log_open(struct inode *inode, struct file *file)
7484 struct trace_array *tr = inode->i_private;
7487 ret = tracing_check_open_get_tr(tr);
7491 /* If this file was opened for write, then erase contents */
7492 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7493 clear_tracing_err_log(tr);
7495 if (file->f_mode & FMODE_READ) {
7496 ret = seq_open(file, &tracing_err_log_seq_ops);
7498 struct seq_file *m = file->private_data;
7501 trace_array_put(tr);
7507 static ssize_t tracing_err_log_write(struct file *file,
7508 const char __user *buffer,
7509 size_t count, loff_t *ppos)
7514 static int tracing_err_log_release(struct inode *inode, struct file *file)
7516 struct trace_array *tr = inode->i_private;
7518 trace_array_put(tr);
7520 if (file->f_mode & FMODE_READ)
7521 seq_release(inode, file);
7526 static const struct file_operations tracing_err_log_fops = {
7527 .open = tracing_err_log_open,
7528 .write = tracing_err_log_write,
7530 .llseek = seq_lseek,
7531 .release = tracing_err_log_release,
7534 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7536 struct trace_array *tr = inode->i_private;
7537 struct ftrace_buffer_info *info;
7540 ret = tracing_check_open_get_tr(tr);
7544 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7546 trace_array_put(tr);
7550 mutex_lock(&trace_types_lock);
7553 info->iter.cpu_file = tracing_get_cpu(inode);
7554 info->iter.trace = tr->current_trace;
7555 info->iter.array_buffer = &tr->array_buffer;
7557 /* Force reading ring buffer for first read */
7558 info->read = (unsigned int)-1;
7560 filp->private_data = info;
7564 mutex_unlock(&trace_types_lock);
7566 ret = nonseekable_open(inode, filp);
7568 trace_array_put(tr);
7574 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7576 struct ftrace_buffer_info *info = filp->private_data;
7577 struct trace_iterator *iter = &info->iter;
7579 return trace_poll(iter, filp, poll_table);
7583 tracing_buffers_read(struct file *filp, char __user *ubuf,
7584 size_t count, loff_t *ppos)
7586 struct ftrace_buffer_info *info = filp->private_data;
7587 struct trace_iterator *iter = &info->iter;
7594 #ifdef CONFIG_TRACER_MAX_TRACE
7595 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7600 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7602 if (IS_ERR(info->spare)) {
7603 ret = PTR_ERR(info->spare);
7606 info->spare_cpu = iter->cpu_file;
7612 /* Do we have previous read data to read? */
7613 if (info->read < PAGE_SIZE)
7617 trace_access_lock(iter->cpu_file);
7618 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7622 trace_access_unlock(iter->cpu_file);
7625 if (trace_empty(iter)) {
7626 if ((filp->f_flags & O_NONBLOCK))
7629 ret = wait_on_pipe(iter, 0);
7640 size = PAGE_SIZE - info->read;
7644 ret = copy_to_user(ubuf, info->spare + info->read, size);
7656 static int tracing_buffers_release(struct inode *inode, struct file *file)
7658 struct ftrace_buffer_info *info = file->private_data;
7659 struct trace_iterator *iter = &info->iter;
7661 mutex_lock(&trace_types_lock);
7663 iter->tr->trace_ref--;
7665 __trace_array_put(iter->tr);
7668 ring_buffer_free_read_page(iter->array_buffer->buffer,
7669 info->spare_cpu, info->spare);
7672 mutex_unlock(&trace_types_lock);
7678 struct trace_buffer *buffer;
7681 refcount_t refcount;
7684 static void buffer_ref_release(struct buffer_ref *ref)
7686 if (!refcount_dec_and_test(&ref->refcount))
7688 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7692 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7693 struct pipe_buffer *buf)
7695 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7697 buffer_ref_release(ref);
7701 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7702 struct pipe_buffer *buf)
7704 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7706 if (refcount_read(&ref->refcount) > INT_MAX/2)
7709 refcount_inc(&ref->refcount);
7713 /* Pipe buffer operations for a buffer. */
7714 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7715 .release = buffer_pipe_buf_release,
7716 .get = buffer_pipe_buf_get,
7720 * Callback from splice_to_pipe(), if we need to release some pages
7721 * at the end of the spd in case we error'ed out in filling the pipe.
7723 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7725 struct buffer_ref *ref =
7726 (struct buffer_ref *)spd->partial[i].private;
7728 buffer_ref_release(ref);
7729 spd->partial[i].private = 0;
7733 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7734 struct pipe_inode_info *pipe, size_t len,
7737 struct ftrace_buffer_info *info = file->private_data;
7738 struct trace_iterator *iter = &info->iter;
7739 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7740 struct page *pages_def[PIPE_DEF_BUFFERS];
7741 struct splice_pipe_desc spd = {
7743 .partial = partial_def,
7744 .nr_pages_max = PIPE_DEF_BUFFERS,
7745 .ops = &buffer_pipe_buf_ops,
7746 .spd_release = buffer_spd_release,
7748 struct buffer_ref *ref;
7752 #ifdef CONFIG_TRACER_MAX_TRACE
7753 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7757 if (*ppos & (PAGE_SIZE - 1))
7760 if (len & (PAGE_SIZE - 1)) {
7761 if (len < PAGE_SIZE)
7766 if (splice_grow_spd(pipe, &spd))
7770 trace_access_lock(iter->cpu_file);
7771 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7773 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7777 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7783 refcount_set(&ref->refcount, 1);
7784 ref->buffer = iter->array_buffer->buffer;
7785 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7786 if (IS_ERR(ref->page)) {
7787 ret = PTR_ERR(ref->page);
7792 ref->cpu = iter->cpu_file;
7794 r = ring_buffer_read_page(ref->buffer, &ref->page,
7795 len, iter->cpu_file, 1);
7797 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7803 page = virt_to_page(ref->page);
7805 spd.pages[i] = page;
7806 spd.partial[i].len = PAGE_SIZE;
7807 spd.partial[i].offset = 0;
7808 spd.partial[i].private = (unsigned long)ref;
7812 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7815 trace_access_unlock(iter->cpu_file);
7818 /* did we read anything? */
7819 if (!spd.nr_pages) {
7824 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7827 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7834 ret = splice_to_pipe(pipe, &spd);
7836 splice_shrink_spd(&spd);
7841 static const struct file_operations tracing_buffers_fops = {
7842 .open = tracing_buffers_open,
7843 .read = tracing_buffers_read,
7844 .poll = tracing_buffers_poll,
7845 .release = tracing_buffers_release,
7846 .splice_read = tracing_buffers_splice_read,
7847 .llseek = no_llseek,
7851 tracing_stats_read(struct file *filp, char __user *ubuf,
7852 size_t count, loff_t *ppos)
7854 struct inode *inode = file_inode(filp);
7855 struct trace_array *tr = inode->i_private;
7856 struct array_buffer *trace_buf = &tr->array_buffer;
7857 int cpu = tracing_get_cpu(inode);
7858 struct trace_seq *s;
7860 unsigned long long t;
7861 unsigned long usec_rem;
7863 s = kmalloc(sizeof(*s), GFP_KERNEL);
7869 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7870 trace_seq_printf(s, "entries: %ld\n", cnt);
7872 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7873 trace_seq_printf(s, "overrun: %ld\n", cnt);
7875 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7876 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7878 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7879 trace_seq_printf(s, "bytes: %ld\n", cnt);
7881 if (trace_clocks[tr->clock_id].in_ns) {
7882 /* local or global for trace_clock */
7883 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7884 usec_rem = do_div(t, USEC_PER_SEC);
7885 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7888 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7889 usec_rem = do_div(t, USEC_PER_SEC);
7890 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7892 /* counter or tsc mode for trace_clock */
7893 trace_seq_printf(s, "oldest event ts: %llu\n",
7894 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7896 trace_seq_printf(s, "now ts: %llu\n",
7897 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7900 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7901 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7903 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7904 trace_seq_printf(s, "read events: %ld\n", cnt);
7906 count = simple_read_from_buffer(ubuf, count, ppos,
7907 s->buffer, trace_seq_used(s));
7914 static const struct file_operations tracing_stats_fops = {
7915 .open = tracing_open_generic_tr,
7916 .read = tracing_stats_read,
7917 .llseek = generic_file_llseek,
7918 .release = tracing_release_generic_tr,
7921 #ifdef CONFIG_DYNAMIC_FTRACE
7924 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7925 size_t cnt, loff_t *ppos)
7931 /* 256 should be plenty to hold the amount needed */
7932 buf = kmalloc(256, GFP_KERNEL);
7936 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7937 ftrace_update_tot_cnt,
7938 ftrace_number_of_pages,
7939 ftrace_number_of_groups);
7941 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7946 static const struct file_operations tracing_dyn_info_fops = {
7947 .open = tracing_open_generic,
7948 .read = tracing_read_dyn_info,
7949 .llseek = generic_file_llseek,
7951 #endif /* CONFIG_DYNAMIC_FTRACE */
7953 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7955 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7956 struct trace_array *tr, struct ftrace_probe_ops *ops,
7959 tracing_snapshot_instance(tr);
7963 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7964 struct trace_array *tr, struct ftrace_probe_ops *ops,
7967 struct ftrace_func_mapper *mapper = data;
7971 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7981 tracing_snapshot_instance(tr);
7985 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7986 struct ftrace_probe_ops *ops, void *data)
7988 struct ftrace_func_mapper *mapper = data;
7991 seq_printf(m, "%ps:", (void *)ip);
7993 seq_puts(m, "snapshot");
7996 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7999 seq_printf(m, ":count=%ld\n", *count);
8001 seq_puts(m, ":unlimited\n");
8007 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8008 unsigned long ip, void *init_data, void **data)
8010 struct ftrace_func_mapper *mapper = *data;
8013 mapper = allocate_ftrace_func_mapper();
8019 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8023 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8024 unsigned long ip, void *data)
8026 struct ftrace_func_mapper *mapper = data;
8031 free_ftrace_func_mapper(mapper, NULL);
8035 ftrace_func_mapper_remove_ip(mapper, ip);
8038 static struct ftrace_probe_ops snapshot_probe_ops = {
8039 .func = ftrace_snapshot,
8040 .print = ftrace_snapshot_print,
8043 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8044 .func = ftrace_count_snapshot,
8045 .print = ftrace_snapshot_print,
8046 .init = ftrace_snapshot_init,
8047 .free = ftrace_snapshot_free,
8051 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8052 char *glob, char *cmd, char *param, int enable)
8054 struct ftrace_probe_ops *ops;
8055 void *count = (void *)-1;
8062 /* hash funcs only work with set_ftrace_filter */
8066 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8069 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8074 number = strsep(¶m, ":");
8076 if (!strlen(number))
8080 * We use the callback data field (which is a pointer)
8083 ret = kstrtoul(number, 0, (unsigned long *)&count);
8088 ret = tracing_alloc_snapshot_instance(tr);
8092 ret = register_ftrace_function_probe(glob, tr, ops, count);
8095 return ret < 0 ? ret : 0;
8098 static struct ftrace_func_command ftrace_snapshot_cmd = {
8100 .func = ftrace_trace_snapshot_callback,
8103 static __init int register_snapshot_cmd(void)
8105 return register_ftrace_command(&ftrace_snapshot_cmd);
8108 static inline __init int register_snapshot_cmd(void) { return 0; }
8109 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8111 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8113 if (WARN_ON(!tr->dir))
8114 return ERR_PTR(-ENODEV);
8116 /* Top directory uses NULL as the parent */
8117 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8120 /* All sub buffers have a descriptor */
8124 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8126 struct dentry *d_tracer;
8129 return tr->percpu_dir;
8131 d_tracer = tracing_get_dentry(tr);
8132 if (IS_ERR(d_tracer))
8135 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8137 MEM_FAIL(!tr->percpu_dir,
8138 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8140 return tr->percpu_dir;
8143 static struct dentry *
8144 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8145 void *data, long cpu, const struct file_operations *fops)
8147 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8149 if (ret) /* See tracing_get_cpu() */
8150 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8155 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8157 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8158 struct dentry *d_cpu;
8159 char cpu_dir[30]; /* 30 characters should be more than enough */
8164 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8165 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8167 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8171 /* per cpu trace_pipe */
8172 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8173 tr, cpu, &tracing_pipe_fops);
8176 trace_create_cpu_file("trace", 0644, d_cpu,
8177 tr, cpu, &tracing_fops);
8179 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8180 tr, cpu, &tracing_buffers_fops);
8182 trace_create_cpu_file("stats", 0444, d_cpu,
8183 tr, cpu, &tracing_stats_fops);
8185 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8186 tr, cpu, &tracing_entries_fops);
8188 #ifdef CONFIG_TRACER_SNAPSHOT
8189 trace_create_cpu_file("snapshot", 0644, d_cpu,
8190 tr, cpu, &snapshot_fops);
8192 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8193 tr, cpu, &snapshot_raw_fops);
8197 #ifdef CONFIG_FTRACE_SELFTEST
8198 /* Let selftest have access to static functions in this file */
8199 #include "trace_selftest.c"
8203 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8206 struct trace_option_dentry *topt = filp->private_data;
8209 if (topt->flags->val & topt->opt->bit)
8214 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8218 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8221 struct trace_option_dentry *topt = filp->private_data;
8225 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8229 if (val != 0 && val != 1)
8232 if (!!(topt->flags->val & topt->opt->bit) != val) {
8233 mutex_lock(&trace_types_lock);
8234 ret = __set_tracer_option(topt->tr, topt->flags,
8236 mutex_unlock(&trace_types_lock);
8247 static const struct file_operations trace_options_fops = {
8248 .open = tracing_open_generic,
8249 .read = trace_options_read,
8250 .write = trace_options_write,
8251 .llseek = generic_file_llseek,
8255 * In order to pass in both the trace_array descriptor as well as the index
8256 * to the flag that the trace option file represents, the trace_array
8257 * has a character array of trace_flags_index[], which holds the index
8258 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8259 * The address of this character array is passed to the flag option file
8260 * read/write callbacks.
8262 * In order to extract both the index and the trace_array descriptor,
8263 * get_tr_index() uses the following algorithm.
8267 * As the pointer itself contains the address of the index (remember
8270 * Then to get the trace_array descriptor, by subtracting that index
8271 * from the ptr, we get to the start of the index itself.
8273 * ptr - idx == &index[0]
8275 * Then a simple container_of() from that pointer gets us to the
8276 * trace_array descriptor.
8278 static void get_tr_index(void *data, struct trace_array **ptr,
8279 unsigned int *pindex)
8281 *pindex = *(unsigned char *)data;
8283 *ptr = container_of(data - *pindex, struct trace_array,
8288 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8291 void *tr_index = filp->private_data;
8292 struct trace_array *tr;
8296 get_tr_index(tr_index, &tr, &index);
8298 if (tr->trace_flags & (1 << index))
8303 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8307 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8310 void *tr_index = filp->private_data;
8311 struct trace_array *tr;
8316 get_tr_index(tr_index, &tr, &index);
8318 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8322 if (val != 0 && val != 1)
8325 mutex_lock(&event_mutex);
8326 mutex_lock(&trace_types_lock);
8327 ret = set_tracer_flag(tr, 1 << index, val);
8328 mutex_unlock(&trace_types_lock);
8329 mutex_unlock(&event_mutex);
8339 static const struct file_operations trace_options_core_fops = {
8340 .open = tracing_open_generic,
8341 .read = trace_options_core_read,
8342 .write = trace_options_core_write,
8343 .llseek = generic_file_llseek,
8346 struct dentry *trace_create_file(const char *name,
8348 struct dentry *parent,
8350 const struct file_operations *fops)
8354 ret = tracefs_create_file(name, mode, parent, data, fops);
8356 pr_warn("Could not create tracefs '%s' entry\n", name);
8362 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8364 struct dentry *d_tracer;
8369 d_tracer = tracing_get_dentry(tr);
8370 if (IS_ERR(d_tracer))
8373 tr->options = tracefs_create_dir("options", d_tracer);
8375 pr_warn("Could not create tracefs directory 'options'\n");
8383 create_trace_option_file(struct trace_array *tr,
8384 struct trace_option_dentry *topt,
8385 struct tracer_flags *flags,
8386 struct tracer_opt *opt)
8388 struct dentry *t_options;
8390 t_options = trace_options_init_dentry(tr);
8394 topt->flags = flags;
8398 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8399 &trace_options_fops);
8404 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8406 struct trace_option_dentry *topts;
8407 struct trace_options *tr_topts;
8408 struct tracer_flags *flags;
8409 struct tracer_opt *opts;
8416 flags = tracer->flags;
8418 if (!flags || !flags->opts)
8422 * If this is an instance, only create flags for tracers
8423 * the instance may have.
8425 if (!trace_ok_for_array(tracer, tr))
8428 for (i = 0; i < tr->nr_topts; i++) {
8429 /* Make sure there's no duplicate flags. */
8430 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8436 for (cnt = 0; opts[cnt].name; cnt++)
8439 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8443 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8450 tr->topts = tr_topts;
8451 tr->topts[tr->nr_topts].tracer = tracer;
8452 tr->topts[tr->nr_topts].topts = topts;
8455 for (cnt = 0; opts[cnt].name; cnt++) {
8456 create_trace_option_file(tr, &topts[cnt], flags,
8458 MEM_FAIL(topts[cnt].entry == NULL,
8459 "Failed to create trace option: %s",
8464 static struct dentry *
8465 create_trace_option_core_file(struct trace_array *tr,
8466 const char *option, long index)
8468 struct dentry *t_options;
8470 t_options = trace_options_init_dentry(tr);
8474 return trace_create_file(option, 0644, t_options,
8475 (void *)&tr->trace_flags_index[index],
8476 &trace_options_core_fops);
8479 static void create_trace_options_dir(struct trace_array *tr)
8481 struct dentry *t_options;
8482 bool top_level = tr == &global_trace;
8485 t_options = trace_options_init_dentry(tr);
8489 for (i = 0; trace_options[i]; i++) {
8491 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8492 create_trace_option_core_file(tr, trace_options[i], i);
8497 rb_simple_read(struct file *filp, char __user *ubuf,
8498 size_t cnt, loff_t *ppos)
8500 struct trace_array *tr = filp->private_data;
8504 r = tracer_tracing_is_on(tr);
8505 r = sprintf(buf, "%d\n", r);
8507 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8511 rb_simple_write(struct file *filp, const char __user *ubuf,
8512 size_t cnt, loff_t *ppos)
8514 struct trace_array *tr = filp->private_data;
8515 struct trace_buffer *buffer = tr->array_buffer.buffer;
8519 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8524 mutex_lock(&trace_types_lock);
8525 if (!!val == tracer_tracing_is_on(tr)) {
8526 val = 0; /* do nothing */
8528 tracer_tracing_on(tr);
8529 if (tr->current_trace->start)
8530 tr->current_trace->start(tr);
8532 tracer_tracing_off(tr);
8533 if (tr->current_trace->stop)
8534 tr->current_trace->stop(tr);
8536 mutex_unlock(&trace_types_lock);
8544 static const struct file_operations rb_simple_fops = {
8545 .open = tracing_open_generic_tr,
8546 .read = rb_simple_read,
8547 .write = rb_simple_write,
8548 .release = tracing_release_generic_tr,
8549 .llseek = default_llseek,
8553 buffer_percent_read(struct file *filp, char __user *ubuf,
8554 size_t cnt, loff_t *ppos)
8556 struct trace_array *tr = filp->private_data;
8560 r = tr->buffer_percent;
8561 r = sprintf(buf, "%d\n", r);
8563 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8567 buffer_percent_write(struct file *filp, const char __user *ubuf,
8568 size_t cnt, loff_t *ppos)
8570 struct trace_array *tr = filp->private_data;
8574 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8584 tr->buffer_percent = val;
8591 static const struct file_operations buffer_percent_fops = {
8592 .open = tracing_open_generic_tr,
8593 .read = buffer_percent_read,
8594 .write = buffer_percent_write,
8595 .release = tracing_release_generic_tr,
8596 .llseek = default_llseek,
8599 static struct dentry *trace_instance_dir;
8602 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8605 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8607 enum ring_buffer_flags rb_flags;
8609 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8613 buf->buffer = ring_buffer_alloc(size, rb_flags);
8617 buf->data = alloc_percpu(struct trace_array_cpu);
8619 ring_buffer_free(buf->buffer);
8624 /* Allocate the first page for all buffers */
8625 set_buffer_entries(&tr->array_buffer,
8626 ring_buffer_size(tr->array_buffer.buffer, 0));
8631 static int allocate_trace_buffers(struct trace_array *tr, int size)
8635 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8639 #ifdef CONFIG_TRACER_MAX_TRACE
8640 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8641 allocate_snapshot ? size : 1);
8642 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8643 ring_buffer_free(tr->array_buffer.buffer);
8644 tr->array_buffer.buffer = NULL;
8645 free_percpu(tr->array_buffer.data);
8646 tr->array_buffer.data = NULL;
8649 tr->allocated_snapshot = allocate_snapshot;
8652 * Only the top level trace array gets its snapshot allocated
8653 * from the kernel command line.
8655 allocate_snapshot = false;
8661 static void free_trace_buffer(struct array_buffer *buf)
8664 ring_buffer_free(buf->buffer);
8666 free_percpu(buf->data);
8671 static void free_trace_buffers(struct trace_array *tr)
8676 free_trace_buffer(&tr->array_buffer);
8678 #ifdef CONFIG_TRACER_MAX_TRACE
8679 free_trace_buffer(&tr->max_buffer);
8683 static void init_trace_flags_index(struct trace_array *tr)
8687 /* Used by the trace options files */
8688 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8689 tr->trace_flags_index[i] = i;
8692 static void __update_tracer_options(struct trace_array *tr)
8696 for (t = trace_types; t; t = t->next)
8697 add_tracer_options(tr, t);
8700 static void update_tracer_options(struct trace_array *tr)
8702 mutex_lock(&trace_types_lock);
8703 __update_tracer_options(tr);
8704 mutex_unlock(&trace_types_lock);
8707 /* Must have trace_types_lock held */
8708 struct trace_array *trace_array_find(const char *instance)
8710 struct trace_array *tr, *found = NULL;
8712 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8713 if (tr->name && strcmp(tr->name, instance) == 0) {
8722 struct trace_array *trace_array_find_get(const char *instance)
8724 struct trace_array *tr;
8726 mutex_lock(&trace_types_lock);
8727 tr = trace_array_find(instance);
8730 mutex_unlock(&trace_types_lock);
8735 static int trace_array_create_dir(struct trace_array *tr)
8739 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8743 ret = event_trace_add_tracer(tr->dir, tr);
8745 tracefs_remove(tr->dir);
8747 init_tracer_tracefs(tr, tr->dir);
8748 __update_tracer_options(tr);
8753 static struct trace_array *trace_array_create(const char *name)
8755 struct trace_array *tr;
8759 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8761 return ERR_PTR(ret);
8763 tr->name = kstrdup(name, GFP_KERNEL);
8767 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8770 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8772 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8774 raw_spin_lock_init(&tr->start_lock);
8776 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8778 tr->current_trace = &nop_trace;
8780 INIT_LIST_HEAD(&tr->systems);
8781 INIT_LIST_HEAD(&tr->events);
8782 INIT_LIST_HEAD(&tr->hist_vars);
8783 INIT_LIST_HEAD(&tr->err_log);
8785 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8788 if (ftrace_allocate_ftrace_ops(tr) < 0)
8791 ftrace_init_trace_array(tr);
8793 init_trace_flags_index(tr);
8795 if (trace_instance_dir) {
8796 ret = trace_array_create_dir(tr);
8800 __trace_early_add_events(tr);
8802 list_add(&tr->list, &ftrace_trace_arrays);
8809 ftrace_free_ftrace_ops(tr);
8810 free_trace_buffers(tr);
8811 free_cpumask_var(tr->tracing_cpumask);
8815 return ERR_PTR(ret);
8818 static int instance_mkdir(const char *name)
8820 struct trace_array *tr;
8823 mutex_lock(&event_mutex);
8824 mutex_lock(&trace_types_lock);
8827 if (trace_array_find(name))
8830 tr = trace_array_create(name);
8832 ret = PTR_ERR_OR_ZERO(tr);
8835 mutex_unlock(&trace_types_lock);
8836 mutex_unlock(&event_mutex);
8841 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8842 * @name: The name of the trace array to be looked up/created.
8844 * Returns pointer to trace array with given name.
8845 * NULL, if it cannot be created.
8847 * NOTE: This function increments the reference counter associated with the
8848 * trace array returned. This makes sure it cannot be freed while in use.
8849 * Use trace_array_put() once the trace array is no longer needed.
8850 * If the trace_array is to be freed, trace_array_destroy() needs to
8851 * be called after the trace_array_put(), or simply let user space delete
8852 * it from the tracefs instances directory. But until the
8853 * trace_array_put() is called, user space can not delete it.
8856 struct trace_array *trace_array_get_by_name(const char *name)
8858 struct trace_array *tr;
8860 mutex_lock(&event_mutex);
8861 mutex_lock(&trace_types_lock);
8863 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8864 if (tr->name && strcmp(tr->name, name) == 0)
8868 tr = trace_array_create(name);
8876 mutex_unlock(&trace_types_lock);
8877 mutex_unlock(&event_mutex);
8880 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8882 static int __remove_instance(struct trace_array *tr)
8886 /* Reference counter for a newly created trace array = 1. */
8887 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
8890 list_del(&tr->list);
8892 /* Disable all the flags that were enabled coming in */
8893 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8894 if ((1 << i) & ZEROED_TRACE_FLAGS)
8895 set_tracer_flag(tr, 1 << i, 0);
8898 tracing_set_nop(tr);
8899 clear_ftrace_function_probes(tr);
8900 event_trace_del_tracer(tr);
8901 ftrace_clear_pids(tr);
8902 ftrace_destroy_function_files(tr);
8903 tracefs_remove(tr->dir);
8904 free_trace_buffers(tr);
8906 for (i = 0; i < tr->nr_topts; i++) {
8907 kfree(tr->topts[i].topts);
8911 free_cpumask_var(tr->tracing_cpumask);
8918 int trace_array_destroy(struct trace_array *this_tr)
8920 struct trace_array *tr;
8926 mutex_lock(&event_mutex);
8927 mutex_lock(&trace_types_lock);
8931 /* Making sure trace array exists before destroying it. */
8932 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8933 if (tr == this_tr) {
8934 ret = __remove_instance(tr);
8939 mutex_unlock(&trace_types_lock);
8940 mutex_unlock(&event_mutex);
8944 EXPORT_SYMBOL_GPL(trace_array_destroy);
8946 static int instance_rmdir(const char *name)
8948 struct trace_array *tr;
8951 mutex_lock(&event_mutex);
8952 mutex_lock(&trace_types_lock);
8955 tr = trace_array_find(name);
8957 ret = __remove_instance(tr);
8959 mutex_unlock(&trace_types_lock);
8960 mutex_unlock(&event_mutex);
8965 static __init void create_trace_instances(struct dentry *d_tracer)
8967 struct trace_array *tr;
8969 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8972 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
8975 mutex_lock(&event_mutex);
8976 mutex_lock(&trace_types_lock);
8978 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8981 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
8982 "Failed to create instance directory\n"))
8986 mutex_unlock(&trace_types_lock);
8987 mutex_unlock(&event_mutex);
8991 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8993 struct trace_event_file *file;
8996 trace_create_file("available_tracers", 0444, d_tracer,
8997 tr, &show_traces_fops);
8999 trace_create_file("current_tracer", 0644, d_tracer,
9000 tr, &set_tracer_fops);
9002 trace_create_file("tracing_cpumask", 0644, d_tracer,
9003 tr, &tracing_cpumask_fops);
9005 trace_create_file("trace_options", 0644, d_tracer,
9006 tr, &tracing_iter_fops);
9008 trace_create_file("trace", 0644, d_tracer,
9011 trace_create_file("trace_pipe", 0444, d_tracer,
9012 tr, &tracing_pipe_fops);
9014 trace_create_file("buffer_size_kb", 0644, d_tracer,
9015 tr, &tracing_entries_fops);
9017 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9018 tr, &tracing_total_entries_fops);
9020 trace_create_file("free_buffer", 0200, d_tracer,
9021 tr, &tracing_free_buffer_fops);
9023 trace_create_file("trace_marker", 0220, d_tracer,
9024 tr, &tracing_mark_fops);
9026 file = __find_event_file(tr, "ftrace", "print");
9027 if (file && file->dir)
9028 trace_create_file("trigger", 0644, file->dir, file,
9029 &event_trigger_fops);
9030 tr->trace_marker_file = file;
9032 trace_create_file("trace_marker_raw", 0220, d_tracer,
9033 tr, &tracing_mark_raw_fops);
9035 trace_create_file("trace_clock", 0644, d_tracer, tr,
9038 trace_create_file("tracing_on", 0644, d_tracer,
9039 tr, &rb_simple_fops);
9041 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9042 &trace_time_stamp_mode_fops);
9044 tr->buffer_percent = 50;
9046 trace_create_file("buffer_percent", 0444, d_tracer,
9047 tr, &buffer_percent_fops);
9049 create_trace_options_dir(tr);
9051 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
9052 trace_create_maxlat_file(tr, d_tracer);
9055 if (ftrace_create_function_files(tr, d_tracer))
9056 MEM_FAIL(1, "Could not allocate function filter files");
9058 #ifdef CONFIG_TRACER_SNAPSHOT
9059 trace_create_file("snapshot", 0644, d_tracer,
9060 tr, &snapshot_fops);
9063 trace_create_file("error_log", 0644, d_tracer,
9064 tr, &tracing_err_log_fops);
9066 for_each_tracing_cpu(cpu)
9067 tracing_init_tracefs_percpu(tr, cpu);
9069 ftrace_init_tracefs(tr, d_tracer);
9072 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9074 struct vfsmount *mnt;
9075 struct file_system_type *type;
9078 * To maintain backward compatibility for tools that mount
9079 * debugfs to get to the tracing facility, tracefs is automatically
9080 * mounted to the debugfs/tracing directory.
9082 type = get_fs_type("tracefs");
9085 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9086 put_filesystem(type);
9095 * tracing_init_dentry - initialize top level trace array
9097 * This is called when creating files or directories in the tracing
9098 * directory. It is called via fs_initcall() by any of the boot up code
9099 * and expects to return the dentry of the top level tracing directory.
9101 int tracing_init_dentry(void)
9103 struct trace_array *tr = &global_trace;
9105 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9106 pr_warn("Tracing disabled due to lockdown\n");
9110 /* The top level trace array uses NULL as parent */
9114 if (WARN_ON(!tracefs_initialized()))
9118 * As there may still be users that expect the tracing
9119 * files to exist in debugfs/tracing, we must automount
9120 * the tracefs file system there, so older tools still
9121 * work with the newer kerenl.
9123 tr->dir = debugfs_create_automount("tracing", NULL,
9124 trace_automount, NULL);
9129 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9130 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9132 static struct workqueue_struct *eval_map_wq __initdata;
9133 static struct work_struct eval_map_work __initdata;
9135 static void __init eval_map_work_func(struct work_struct *work)
9139 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9140 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9143 static int __init trace_eval_init(void)
9145 INIT_WORK(&eval_map_work, eval_map_work_func);
9147 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9149 pr_err("Unable to allocate eval_map_wq\n");
9151 eval_map_work_func(&eval_map_work);
9155 queue_work(eval_map_wq, &eval_map_work);
9159 static int __init trace_eval_sync(void)
9161 /* Make sure the eval map updates are finished */
9163 destroy_workqueue(eval_map_wq);
9167 late_initcall_sync(trace_eval_sync);
9170 #ifdef CONFIG_MODULES
9171 static void trace_module_add_evals(struct module *mod)
9173 if (!mod->num_trace_evals)
9177 * Modules with bad taint do not have events created, do
9178 * not bother with enums either.
9180 if (trace_module_has_bad_taint(mod))
9183 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9186 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9187 static void trace_module_remove_evals(struct module *mod)
9189 union trace_eval_map_item *map;
9190 union trace_eval_map_item **last = &trace_eval_maps;
9192 if (!mod->num_trace_evals)
9195 mutex_lock(&trace_eval_mutex);
9197 map = trace_eval_maps;
9200 if (map->head.mod == mod)
9202 map = trace_eval_jmp_to_tail(map);
9203 last = &map->tail.next;
9204 map = map->tail.next;
9209 *last = trace_eval_jmp_to_tail(map)->tail.next;
9212 mutex_unlock(&trace_eval_mutex);
9215 static inline void trace_module_remove_evals(struct module *mod) { }
9216 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9218 static int trace_module_notify(struct notifier_block *self,
9219 unsigned long val, void *data)
9221 struct module *mod = data;
9224 case MODULE_STATE_COMING:
9225 trace_module_add_evals(mod);
9227 case MODULE_STATE_GOING:
9228 trace_module_remove_evals(mod);
9235 static struct notifier_block trace_module_nb = {
9236 .notifier_call = trace_module_notify,
9239 #endif /* CONFIG_MODULES */
9241 static __init int tracer_init_tracefs(void)
9245 trace_access_lock_init();
9247 ret = tracing_init_dentry();
9253 init_tracer_tracefs(&global_trace, NULL);
9254 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9256 trace_create_file("tracing_thresh", 0644, NULL,
9257 &global_trace, &tracing_thresh_fops);
9259 trace_create_file("README", 0444, NULL,
9260 NULL, &tracing_readme_fops);
9262 trace_create_file("saved_cmdlines", 0444, NULL,
9263 NULL, &tracing_saved_cmdlines_fops);
9265 trace_create_file("saved_cmdlines_size", 0644, NULL,
9266 NULL, &tracing_saved_cmdlines_size_fops);
9268 trace_create_file("saved_tgids", 0444, NULL,
9269 NULL, &tracing_saved_tgids_fops);
9273 trace_create_eval_file(NULL);
9275 #ifdef CONFIG_MODULES
9276 register_module_notifier(&trace_module_nb);
9279 #ifdef CONFIG_DYNAMIC_FTRACE
9280 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
9281 NULL, &tracing_dyn_info_fops);
9284 create_trace_instances(NULL);
9286 update_tracer_options(&global_trace);
9291 static int trace_panic_handler(struct notifier_block *this,
9292 unsigned long event, void *unused)
9294 if (ftrace_dump_on_oops)
9295 ftrace_dump(ftrace_dump_on_oops);
9299 static struct notifier_block trace_panic_notifier = {
9300 .notifier_call = trace_panic_handler,
9302 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9305 static int trace_die_handler(struct notifier_block *self,
9311 if (ftrace_dump_on_oops)
9312 ftrace_dump(ftrace_dump_on_oops);
9320 static struct notifier_block trace_die_notifier = {
9321 .notifier_call = trace_die_handler,
9326 * printk is set to max of 1024, we really don't need it that big.
9327 * Nothing should be printing 1000 characters anyway.
9329 #define TRACE_MAX_PRINT 1000
9332 * Define here KERN_TRACE so that we have one place to modify
9333 * it if we decide to change what log level the ftrace dump
9336 #define KERN_TRACE KERN_EMERG
9339 trace_printk_seq(struct trace_seq *s)
9341 /* Probably should print a warning here. */
9342 if (s->seq.len >= TRACE_MAX_PRINT)
9343 s->seq.len = TRACE_MAX_PRINT;
9346 * More paranoid code. Although the buffer size is set to
9347 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9348 * an extra layer of protection.
9350 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9351 s->seq.len = s->seq.size - 1;
9353 /* should be zero ended, but we are paranoid. */
9354 s->buffer[s->seq.len] = 0;
9356 printk(KERN_TRACE "%s", s->buffer);
9361 void trace_init_global_iter(struct trace_iterator *iter)
9363 iter->tr = &global_trace;
9364 iter->trace = iter->tr->current_trace;
9365 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9366 iter->array_buffer = &global_trace.array_buffer;
9368 if (iter->trace && iter->trace->open)
9369 iter->trace->open(iter);
9371 /* Annotate start of buffers if we had overruns */
9372 if (ring_buffer_overruns(iter->array_buffer->buffer))
9373 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9375 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9376 if (trace_clocks[iter->tr->clock_id].in_ns)
9377 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9380 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9382 /* use static because iter can be a bit big for the stack */
9383 static struct trace_iterator iter;
9384 static atomic_t dump_running;
9385 struct trace_array *tr = &global_trace;
9386 unsigned int old_userobj;
9387 unsigned long flags;
9390 /* Only allow one dump user at a time. */
9391 if (atomic_inc_return(&dump_running) != 1) {
9392 atomic_dec(&dump_running);
9397 * Always turn off tracing when we dump.
9398 * We don't need to show trace output of what happens
9399 * between multiple crashes.
9401 * If the user does a sysrq-z, then they can re-enable
9402 * tracing with echo 1 > tracing_on.
9406 local_irq_save(flags);
9407 printk_nmi_direct_enter();
9409 /* Simulate the iterator */
9410 trace_init_global_iter(&iter);
9411 /* Can not use kmalloc for iter.temp and iter.fmt */
9412 iter.temp = static_temp_buf;
9413 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9414 iter.fmt = static_fmt_buf;
9415 iter.fmt_size = STATIC_FMT_BUF_SIZE;
9417 for_each_tracing_cpu(cpu) {
9418 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9421 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9423 /* don't look at user memory in panic mode */
9424 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9426 switch (oops_dump_mode) {
9428 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9431 iter.cpu_file = raw_smp_processor_id();
9436 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9437 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9440 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9442 /* Did function tracer already get disabled? */
9443 if (ftrace_is_dead()) {
9444 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9445 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9449 * We need to stop all tracing on all CPUS to read
9450 * the next buffer. This is a bit expensive, but is
9451 * not done often. We fill all what we can read,
9452 * and then release the locks again.
9455 while (!trace_empty(&iter)) {
9458 printk(KERN_TRACE "---------------------------------\n");
9462 trace_iterator_reset(&iter);
9463 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9465 if (trace_find_next_entry_inc(&iter) != NULL) {
9468 ret = print_trace_line(&iter);
9469 if (ret != TRACE_TYPE_NO_CONSUME)
9470 trace_consume(&iter);
9472 touch_nmi_watchdog();
9474 trace_printk_seq(&iter.seq);
9478 printk(KERN_TRACE " (ftrace buffer empty)\n");
9480 printk(KERN_TRACE "---------------------------------\n");
9483 tr->trace_flags |= old_userobj;
9485 for_each_tracing_cpu(cpu) {
9486 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9488 atomic_dec(&dump_running);
9489 printk_nmi_direct_exit();
9490 local_irq_restore(flags);
9492 EXPORT_SYMBOL_GPL(ftrace_dump);
9494 #define WRITE_BUFSIZE 4096
9496 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9497 size_t count, loff_t *ppos,
9498 int (*createfn)(const char *))
9500 char *kbuf, *buf, *tmp;
9505 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9509 while (done < count) {
9510 size = count - done;
9512 if (size >= WRITE_BUFSIZE)
9513 size = WRITE_BUFSIZE - 1;
9515 if (copy_from_user(kbuf, buffer + done, size)) {
9522 tmp = strchr(buf, '\n');
9525 size = tmp - buf + 1;
9528 if (done + size < count) {
9531 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9532 pr_warn("Line length is too long: Should be less than %d\n",
9540 /* Remove comments */
9541 tmp = strchr(buf, '#');
9546 ret = createfn(buf);
9551 } while (done < count);
9561 __init static int tracer_alloc_buffers(void)
9567 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9568 pr_warn("Tracing disabled due to lockdown\n");
9573 * Make sure we don't accidentally add more trace options
9574 * than we have bits for.
9576 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9578 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9581 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9582 goto out_free_buffer_mask;
9584 /* Only allocate trace_printk buffers if a trace_printk exists */
9585 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9586 /* Must be called before global_trace.buffer is allocated */
9587 trace_printk_init_buffers();
9589 /* To save memory, keep the ring buffer size to its minimum */
9590 if (ring_buffer_expanded)
9591 ring_buf_size = trace_buf_size;
9595 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9596 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9598 raw_spin_lock_init(&global_trace.start_lock);
9601 * The prepare callbacks allocates some memory for the ring buffer. We
9602 * don't free the buffer if the CPU goes down. If we were to free
9603 * the buffer, then the user would lose any trace that was in the
9604 * buffer. The memory will be removed once the "instance" is removed.
9606 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9607 "trace/RB:preapre", trace_rb_cpu_prepare,
9610 goto out_free_cpumask;
9611 /* Used for event triggers */
9613 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9615 goto out_rm_hp_state;
9617 if (trace_create_savedcmd() < 0)
9618 goto out_free_temp_buffer;
9620 /* TODO: make the number of buffers hot pluggable with CPUS */
9621 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9622 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9623 goto out_free_savedcmd;
9626 if (global_trace.buffer_disabled)
9629 if (trace_boot_clock) {
9630 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9632 pr_warn("Trace clock %s not defined, going back to default\n",
9637 * register_tracer() might reference current_trace, so it
9638 * needs to be set before we register anything. This is
9639 * just a bootstrap of current_trace anyway.
9641 global_trace.current_trace = &nop_trace;
9643 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9645 ftrace_init_global_array_ops(&global_trace);
9647 init_trace_flags_index(&global_trace);
9649 register_tracer(&nop_trace);
9651 /* Function tracing may start here (via kernel command line) */
9652 init_function_trace();
9654 /* All seems OK, enable tracing */
9655 tracing_disabled = 0;
9657 atomic_notifier_chain_register(&panic_notifier_list,
9658 &trace_panic_notifier);
9660 register_die_notifier(&trace_die_notifier);
9662 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9664 INIT_LIST_HEAD(&global_trace.systems);
9665 INIT_LIST_HEAD(&global_trace.events);
9666 INIT_LIST_HEAD(&global_trace.hist_vars);
9667 INIT_LIST_HEAD(&global_trace.err_log);
9668 list_add(&global_trace.list, &ftrace_trace_arrays);
9670 apply_trace_boot_options();
9672 register_snapshot_cmd();
9677 free_saved_cmdlines_buffer(savedcmd);
9678 out_free_temp_buffer:
9679 ring_buffer_free(temp_buffer);
9681 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9683 free_cpumask_var(global_trace.tracing_cpumask);
9684 out_free_buffer_mask:
9685 free_cpumask_var(tracing_buffer_mask);
9690 void __init early_trace_init(void)
9692 if (tracepoint_printk) {
9693 tracepoint_print_iter =
9694 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9695 if (MEM_FAIL(!tracepoint_print_iter,
9696 "Failed to allocate trace iterator\n"))
9697 tracepoint_printk = 0;
9699 static_key_enable(&tracepoint_printk_key.key);
9701 tracer_alloc_buffers();
9704 void __init trace_init(void)
9709 __init static int clear_boot_tracer(void)
9712 * The default tracer at boot buffer is an init section.
9713 * This function is called in lateinit. If we did not
9714 * find the boot tracer, then clear it out, to prevent
9715 * later registration from accessing the buffer that is
9716 * about to be freed.
9718 if (!default_bootup_tracer)
9721 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9722 default_bootup_tracer);
9723 default_bootup_tracer = NULL;
9728 fs_initcall(tracer_init_tracefs);
9729 late_initcall_sync(clear_boot_tracer);
9731 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9732 __init static int tracing_set_default_clock(void)
9734 /* sched_clock_stable() is determined in late_initcall */
9735 if (!trace_boot_clock && !sched_clock_stable()) {
9736 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9737 pr_warn("Can not set tracing clock due to lockdown\n");
9742 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9743 "If you want to keep using the local clock, then add:\n"
9744 " \"trace_clock=local\"\n"
9745 "on the kernel command line\n");
9746 tracing_set_clock(&global_trace, "global");
9751 late_initcall_sync(tracing_set_default_clock);