1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
53 #include "trace_output.h"
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
59 bool ring_buffer_expanded;
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
68 static bool __read_mostly tracing_selftest_running;
71 * If boot-time tracing including tracers/events via kernel cmdline
72 * is running, we do not want to run SELFTEST.
74 bool __read_mostly tracing_selftest_disabled;
76 #ifdef CONFIG_FTRACE_STARTUP_TEST
77 void __init disable_tracing_selftest(const char *reason)
79 if (!tracing_selftest_disabled) {
80 tracing_selftest_disabled = true;
81 pr_info("Ftrace startup test is disabled due to %s\n", reason);
86 /* Pipe tracepoints to printk */
87 struct trace_iterator *tracepoint_print_iter;
88 int tracepoint_printk;
89 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
91 /* For tracers that don't implement custom flags */
92 static struct tracer_opt dummy_tracer_opt[] = {
97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
103 * To prevent the comm cache from being overwritten when no
104 * tracing is active, only save the comm when a trace event
107 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
110 * Kill all tracing for good (never come back).
111 * It is initialized to 1 but will turn to zero if the initialization
112 * of the tracer is successful. But that is the only place that sets
115 static int tracing_disabled = 1;
117 cpumask_var_t __read_mostly tracing_buffer_mask;
120 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123 * is set, then ftrace_dump is called. This will output the contents
124 * of the ftrace buffers to the console. This is very useful for
125 * capturing traces that lead to crashes and outputing it to a
128 * It is default off, but you can enable it with either specifying
129 * "ftrace_dump_on_oops" in the kernel command line, or setting
130 * /proc/sys/kernel/ftrace_dump_on_oops
131 * Set 1 if you want to dump buffers of all CPUs
132 * Set 2 if you want to dump the buffer of the CPU that triggered oops
135 enum ftrace_dump_mode ftrace_dump_on_oops;
137 /* When set, tracing will stop when a WARN*() is hit */
138 int __disable_trace_on_warning;
140 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
141 /* Map of enums to their values, for "eval_map" file */
142 struct trace_eval_map_head {
144 unsigned long length;
147 union trace_eval_map_item;
149 struct trace_eval_map_tail {
151 * "end" is first and points to NULL as it must be different
152 * than "mod" or "eval_string"
154 union trace_eval_map_item *next;
155 const char *end; /* points to NULL */
158 static DEFINE_MUTEX(trace_eval_mutex);
161 * The trace_eval_maps are saved in an array with two extra elements,
162 * one at the beginning, and one at the end. The beginning item contains
163 * the count of the saved maps (head.length), and the module they
164 * belong to if not built in (head.mod). The ending item contains a
165 * pointer to the next array of saved eval_map items.
167 union trace_eval_map_item {
168 struct trace_eval_map map;
169 struct trace_eval_map_head head;
170 struct trace_eval_map_tail tail;
173 static union trace_eval_map_item *trace_eval_maps;
174 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
176 int tracing_set_tracer(struct trace_array *tr, const char *buf);
177 static void ftrace_trace_userstack(struct trace_array *tr,
178 struct trace_buffer *buffer,
179 unsigned int trace_ctx);
181 #define MAX_TRACER_SIZE 100
182 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
183 static char *default_bootup_tracer;
185 static bool allocate_snapshot;
187 static int __init set_cmdline_ftrace(char *str)
189 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
190 default_bootup_tracer = bootup_tracer_buf;
191 /* We are using ftrace early, expand it */
192 ring_buffer_expanded = true;
195 __setup("ftrace=", set_cmdline_ftrace);
197 static int __init set_ftrace_dump_on_oops(char *str)
199 if (*str++ != '=' || !*str) {
200 ftrace_dump_on_oops = DUMP_ALL;
204 if (!strcmp("orig_cpu", str)) {
205 ftrace_dump_on_oops = DUMP_ORIG;
211 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
213 static int __init stop_trace_on_warning(char *str)
215 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
216 __disable_trace_on_warning = 1;
219 __setup("traceoff_on_warning", stop_trace_on_warning);
221 static int __init boot_alloc_snapshot(char *str)
223 allocate_snapshot = true;
224 /* We also need the main ring buffer expanded */
225 ring_buffer_expanded = true;
228 __setup("alloc_snapshot", boot_alloc_snapshot);
231 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
233 static int __init set_trace_boot_options(char *str)
235 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
238 __setup("trace_options=", set_trace_boot_options);
240 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
241 static char *trace_boot_clock __initdata;
243 static int __init set_trace_boot_clock(char *str)
245 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
246 trace_boot_clock = trace_boot_clock_buf;
249 __setup("trace_clock=", set_trace_boot_clock);
251 static int __init set_tracepoint_printk(char *str)
253 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
254 tracepoint_printk = 1;
257 __setup("tp_printk", set_tracepoint_printk);
259 unsigned long long ns2usecs(u64 nsec)
267 trace_process_export(struct trace_export *export,
268 struct ring_buffer_event *event, int flag)
270 struct trace_entry *entry;
271 unsigned int size = 0;
273 if (export->flags & flag) {
274 entry = ring_buffer_event_data(event);
275 size = ring_buffer_event_length(event);
276 export->write(export, entry, size);
280 static DEFINE_MUTEX(ftrace_export_lock);
282 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
284 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
285 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
286 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
288 static inline void ftrace_exports_enable(struct trace_export *export)
290 if (export->flags & TRACE_EXPORT_FUNCTION)
291 static_branch_inc(&trace_function_exports_enabled);
293 if (export->flags & TRACE_EXPORT_EVENT)
294 static_branch_inc(&trace_event_exports_enabled);
296 if (export->flags & TRACE_EXPORT_MARKER)
297 static_branch_inc(&trace_marker_exports_enabled);
300 static inline void ftrace_exports_disable(struct trace_export *export)
302 if (export->flags & TRACE_EXPORT_FUNCTION)
303 static_branch_dec(&trace_function_exports_enabled);
305 if (export->flags & TRACE_EXPORT_EVENT)
306 static_branch_dec(&trace_event_exports_enabled);
308 if (export->flags & TRACE_EXPORT_MARKER)
309 static_branch_dec(&trace_marker_exports_enabled);
312 static void ftrace_exports(struct ring_buffer_event *event, int flag)
314 struct trace_export *export;
316 preempt_disable_notrace();
318 export = rcu_dereference_raw_check(ftrace_exports_list);
320 trace_process_export(export, event, flag);
321 export = rcu_dereference_raw_check(export->next);
324 preempt_enable_notrace();
328 add_trace_export(struct trace_export **list, struct trace_export *export)
330 rcu_assign_pointer(export->next, *list);
332 * We are entering export into the list but another
333 * CPU might be walking that list. We need to make sure
334 * the export->next pointer is valid before another CPU sees
335 * the export pointer included into the list.
337 rcu_assign_pointer(*list, export);
341 rm_trace_export(struct trace_export **list, struct trace_export *export)
343 struct trace_export **p;
345 for (p = list; *p != NULL; p = &(*p)->next)
352 rcu_assign_pointer(*p, (*p)->next);
358 add_ftrace_export(struct trace_export **list, struct trace_export *export)
360 ftrace_exports_enable(export);
362 add_trace_export(list, export);
366 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
370 ret = rm_trace_export(list, export);
371 ftrace_exports_disable(export);
376 int register_ftrace_export(struct trace_export *export)
378 if (WARN_ON_ONCE(!export->write))
381 mutex_lock(&ftrace_export_lock);
383 add_ftrace_export(&ftrace_exports_list, export);
385 mutex_unlock(&ftrace_export_lock);
389 EXPORT_SYMBOL_GPL(register_ftrace_export);
391 int unregister_ftrace_export(struct trace_export *export)
395 mutex_lock(&ftrace_export_lock);
397 ret = rm_ftrace_export(&ftrace_exports_list, export);
399 mutex_unlock(&ftrace_export_lock);
403 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
405 /* trace_flags holds trace_options default values */
406 #define TRACE_DEFAULT_FLAGS \
407 (FUNCTION_DEFAULT_FLAGS | \
408 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
409 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
410 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
411 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
414 /* trace_options that are only supported by global_trace */
415 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
416 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
418 /* trace_flags that are default zero for instances */
419 #define ZEROED_TRACE_FLAGS \
420 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
423 * The global_trace is the descriptor that holds the top-level tracing
424 * buffers for the live tracing.
426 static struct trace_array global_trace = {
427 .trace_flags = TRACE_DEFAULT_FLAGS,
430 LIST_HEAD(ftrace_trace_arrays);
432 int trace_array_get(struct trace_array *this_tr)
434 struct trace_array *tr;
437 mutex_lock(&trace_types_lock);
438 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
445 mutex_unlock(&trace_types_lock);
450 static void __trace_array_put(struct trace_array *this_tr)
452 WARN_ON(!this_tr->ref);
457 * trace_array_put - Decrement the reference counter for this trace array.
458 * @this_tr : pointer to the trace array
460 * NOTE: Use this when we no longer need the trace array returned by
461 * trace_array_get_by_name(). This ensures the trace array can be later
465 void trace_array_put(struct trace_array *this_tr)
470 mutex_lock(&trace_types_lock);
471 __trace_array_put(this_tr);
472 mutex_unlock(&trace_types_lock);
474 EXPORT_SYMBOL_GPL(trace_array_put);
476 int tracing_check_open_get_tr(struct trace_array *tr)
480 ret = security_locked_down(LOCKDOWN_TRACEFS);
484 if (tracing_disabled)
487 if (tr && trace_array_get(tr) < 0)
493 int call_filter_check_discard(struct trace_event_call *call, void *rec,
494 struct trace_buffer *buffer,
495 struct ring_buffer_event *event)
497 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
498 !filter_match_preds(call->filter, rec)) {
499 __trace_event_discard_commit(buffer, event);
506 void trace_free_pid_list(struct trace_pid_list *pid_list)
508 vfree(pid_list->pids);
513 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
514 * @filtered_pids: The list of pids to check
515 * @search_pid: The PID to find in @filtered_pids
517 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
520 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
523 * If pid_max changed after filtered_pids was created, we
524 * by default ignore all pids greater than the previous pid_max.
526 if (search_pid >= filtered_pids->pid_max)
529 return test_bit(search_pid, filtered_pids->pids);
533 * trace_ignore_this_task - should a task be ignored for tracing
534 * @filtered_pids: The list of pids to check
535 * @filtered_no_pids: The list of pids not to be traced
536 * @task: The task that should be ignored if not filtered
538 * Checks if @task should be traced or not from @filtered_pids.
539 * Returns true if @task should *NOT* be traced.
540 * Returns false if @task should be traced.
543 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
544 struct trace_pid_list *filtered_no_pids,
545 struct task_struct *task)
548 * If filterd_no_pids is not empty, and the task's pid is listed
549 * in filtered_no_pids, then return true.
550 * Otherwise, if filtered_pids is empty, that means we can
551 * trace all tasks. If it has content, then only trace pids
552 * within filtered_pids.
555 return (filtered_pids &&
556 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
558 trace_find_filtered_pid(filtered_no_pids, task->pid));
562 * trace_filter_add_remove_task - Add or remove a task from a pid_list
563 * @pid_list: The list to modify
564 * @self: The current task for fork or NULL for exit
565 * @task: The task to add or remove
567 * If adding a task, if @self is defined, the task is only added if @self
568 * is also included in @pid_list. This happens on fork and tasks should
569 * only be added when the parent is listed. If @self is NULL, then the
570 * @task pid will be removed from the list, which would happen on exit
573 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
574 struct task_struct *self,
575 struct task_struct *task)
580 /* For forks, we only add if the forking task is listed */
582 if (!trace_find_filtered_pid(pid_list, self->pid))
586 /* Sorry, but we don't support pid_max changing after setting */
587 if (task->pid >= pid_list->pid_max)
590 /* "self" is set for forks, and NULL for exits */
592 set_bit(task->pid, pid_list->pids);
594 clear_bit(task->pid, pid_list->pids);
598 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
599 * @pid_list: The pid list to show
600 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
601 * @pos: The position of the file
603 * This is used by the seq_file "next" operation to iterate the pids
604 * listed in a trace_pid_list structure.
606 * Returns the pid+1 as we want to display pid of zero, but NULL would
607 * stop the iteration.
609 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
611 unsigned long pid = (unsigned long)v;
615 /* pid already is +1 of the actual prevous bit */
616 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
618 /* Return pid + 1 to allow zero to be represented */
619 if (pid < pid_list->pid_max)
620 return (void *)(pid + 1);
626 * trace_pid_start - Used for seq_file to start reading pid lists
627 * @pid_list: The pid list to show
628 * @pos: The position of the file
630 * This is used by seq_file "start" operation to start the iteration
633 * Returns the pid+1 as we want to display pid of zero, but NULL would
634 * stop the iteration.
636 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
641 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
642 if (pid >= pid_list->pid_max)
645 /* Return pid + 1 so that zero can be the exit value */
646 for (pid++; pid && l < *pos;
647 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
653 * trace_pid_show - show the current pid in seq_file processing
654 * @m: The seq_file structure to write into
655 * @v: A void pointer of the pid (+1) value to display
657 * Can be directly used by seq_file operations to display the current
660 int trace_pid_show(struct seq_file *m, void *v)
662 unsigned long pid = (unsigned long)v - 1;
664 seq_printf(m, "%lu\n", pid);
668 /* 128 should be much more than enough */
669 #define PID_BUF_SIZE 127
671 int trace_pid_write(struct trace_pid_list *filtered_pids,
672 struct trace_pid_list **new_pid_list,
673 const char __user *ubuf, size_t cnt)
675 struct trace_pid_list *pid_list;
676 struct trace_parser parser;
684 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
688 * Always recreate a new array. The write is an all or nothing
689 * operation. Always create a new array when adding new pids by
690 * the user. If the operation fails, then the current list is
693 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
695 trace_parser_put(&parser);
699 pid_list->pid_max = READ_ONCE(pid_max);
701 /* Only truncating will shrink pid_max */
702 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
703 pid_list->pid_max = filtered_pids->pid_max;
705 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
706 if (!pid_list->pids) {
707 trace_parser_put(&parser);
713 /* copy the current bits to the new max */
714 for_each_set_bit(pid, filtered_pids->pids,
715 filtered_pids->pid_max) {
716 set_bit(pid, pid_list->pids);
725 ret = trace_get_user(&parser, ubuf, cnt, &pos);
726 if (ret < 0 || !trace_parser_loaded(&parser))
734 if (kstrtoul(parser.buffer, 0, &val))
736 if (val >= pid_list->pid_max)
741 set_bit(pid, pid_list->pids);
744 trace_parser_clear(&parser);
747 trace_parser_put(&parser);
750 trace_free_pid_list(pid_list);
755 /* Cleared the list of pids */
756 trace_free_pid_list(pid_list);
761 *new_pid_list = pid_list;
766 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
770 /* Early boot up does not have a buffer yet */
772 return trace_clock_local();
774 ts = ring_buffer_time_stamp(buf->buffer, cpu);
775 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
780 u64 ftrace_now(int cpu)
782 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
786 * tracing_is_enabled - Show if global_trace has been enabled
788 * Shows if the global trace has been enabled or not. It uses the
789 * mirror flag "buffer_disabled" to be used in fast paths such as for
790 * the irqsoff tracer. But it may be inaccurate due to races. If you
791 * need to know the accurate state, use tracing_is_on() which is a little
792 * slower, but accurate.
794 int tracing_is_enabled(void)
797 * For quick access (irqsoff uses this in fast path), just
798 * return the mirror variable of the state of the ring buffer.
799 * It's a little racy, but we don't really care.
802 return !global_trace.buffer_disabled;
806 * trace_buf_size is the size in bytes that is allocated
807 * for a buffer. Note, the number of bytes is always rounded
810 * This number is purposely set to a low number of 16384.
811 * If the dump on oops happens, it will be much appreciated
812 * to not have to wait for all that output. Anyway this can be
813 * boot time and run time configurable.
815 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
817 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
819 /* trace_types holds a link list of available tracers. */
820 static struct tracer *trace_types __read_mostly;
823 * trace_types_lock is used to protect the trace_types list.
825 DEFINE_MUTEX(trace_types_lock);
828 * serialize the access of the ring buffer
830 * ring buffer serializes readers, but it is low level protection.
831 * The validity of the events (which returns by ring_buffer_peek() ..etc)
832 * are not protected by ring buffer.
834 * The content of events may become garbage if we allow other process consumes
835 * these events concurrently:
836 * A) the page of the consumed events may become a normal page
837 * (not reader page) in ring buffer, and this page will be rewrited
838 * by events producer.
839 * B) The page of the consumed events may become a page for splice_read,
840 * and this page will be returned to system.
842 * These primitives allow multi process access to different cpu ring buffer
845 * These primitives don't distinguish read-only and read-consume access.
846 * Multi read-only access are also serialized.
850 static DECLARE_RWSEM(all_cpu_access_lock);
851 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
853 static inline void trace_access_lock(int cpu)
855 if (cpu == RING_BUFFER_ALL_CPUS) {
856 /* gain it for accessing the whole ring buffer. */
857 down_write(&all_cpu_access_lock);
859 /* gain it for accessing a cpu ring buffer. */
861 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
862 down_read(&all_cpu_access_lock);
864 /* Secondly block other access to this @cpu ring buffer. */
865 mutex_lock(&per_cpu(cpu_access_lock, cpu));
869 static inline void trace_access_unlock(int cpu)
871 if (cpu == RING_BUFFER_ALL_CPUS) {
872 up_write(&all_cpu_access_lock);
874 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
875 up_read(&all_cpu_access_lock);
879 static inline void trace_access_lock_init(void)
883 for_each_possible_cpu(cpu)
884 mutex_init(&per_cpu(cpu_access_lock, cpu));
889 static DEFINE_MUTEX(access_lock);
891 static inline void trace_access_lock(int cpu)
894 mutex_lock(&access_lock);
897 static inline void trace_access_unlock(int cpu)
900 mutex_unlock(&access_lock);
903 static inline void trace_access_lock_init(void)
909 #ifdef CONFIG_STACKTRACE
910 static void __ftrace_trace_stack(struct trace_buffer *buffer,
911 unsigned int trace_ctx,
912 int skip, struct pt_regs *regs);
913 static inline void ftrace_trace_stack(struct trace_array *tr,
914 struct trace_buffer *buffer,
915 unsigned int trace_ctx,
916 int skip, struct pt_regs *regs);
919 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
920 unsigned int trace_ctx,
921 int skip, struct pt_regs *regs)
924 static inline void ftrace_trace_stack(struct trace_array *tr,
925 struct trace_buffer *buffer,
926 unsigned long trace_ctx,
927 int skip, struct pt_regs *regs)
933 static __always_inline void
934 trace_event_setup(struct ring_buffer_event *event,
935 int type, unsigned int trace_ctx)
937 struct trace_entry *ent = ring_buffer_event_data(event);
939 tracing_generic_entry_update(ent, type, trace_ctx);
942 static __always_inline struct ring_buffer_event *
943 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
946 unsigned int trace_ctx)
948 struct ring_buffer_event *event;
950 event = ring_buffer_lock_reserve(buffer, len);
952 trace_event_setup(event, type, trace_ctx);
957 void tracer_tracing_on(struct trace_array *tr)
959 if (tr->array_buffer.buffer)
960 ring_buffer_record_on(tr->array_buffer.buffer);
962 * This flag is looked at when buffers haven't been allocated
963 * yet, or by some tracers (like irqsoff), that just want to
964 * know if the ring buffer has been disabled, but it can handle
965 * races of where it gets disabled but we still do a record.
966 * As the check is in the fast path of the tracers, it is more
967 * important to be fast than accurate.
969 tr->buffer_disabled = 0;
970 /* Make the flag seen by readers */
975 * tracing_on - enable tracing buffers
977 * This function enables tracing buffers that may have been
978 * disabled with tracing_off.
980 void tracing_on(void)
982 tracer_tracing_on(&global_trace);
984 EXPORT_SYMBOL_GPL(tracing_on);
987 static __always_inline void
988 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
990 __this_cpu_write(trace_taskinfo_save, true);
992 /* If this is the temp buffer, we need to commit fully */
993 if (this_cpu_read(trace_buffered_event) == event) {
994 /* Length is in event->array[0] */
995 ring_buffer_write(buffer, event->array[0], &event->array[1]);
996 /* Release the temp buffer */
997 this_cpu_dec(trace_buffered_event_cnt);
999 ring_buffer_unlock_commit(buffer, event);
1003 * __trace_puts - write a constant string into the trace buffer.
1004 * @ip: The address of the caller
1005 * @str: The constant string to write
1006 * @size: The size of the string.
1008 int __trace_puts(unsigned long ip, const char *str, int size)
1010 struct ring_buffer_event *event;
1011 struct trace_buffer *buffer;
1012 struct print_entry *entry;
1013 unsigned int trace_ctx;
1016 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1019 if (unlikely(tracing_selftest_running || tracing_disabled))
1022 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1024 trace_ctx = tracing_gen_ctx();
1025 buffer = global_trace.array_buffer.buffer;
1026 ring_buffer_nest_start(buffer);
1027 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1034 entry = ring_buffer_event_data(event);
1037 memcpy(&entry->buf, str, size);
1039 /* Add a newline if necessary */
1040 if (entry->buf[size - 1] != '\n') {
1041 entry->buf[size] = '\n';
1042 entry->buf[size + 1] = '\0';
1044 entry->buf[size] = '\0';
1046 __buffer_unlock_commit(buffer, event);
1047 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1049 ring_buffer_nest_end(buffer);
1052 EXPORT_SYMBOL_GPL(__trace_puts);
1055 * __trace_bputs - write the pointer to a constant string into trace buffer
1056 * @ip: The address of the caller
1057 * @str: The constant string to write to the buffer to
1059 int __trace_bputs(unsigned long ip, const char *str)
1061 struct ring_buffer_event *event;
1062 struct trace_buffer *buffer;
1063 struct bputs_entry *entry;
1064 unsigned int trace_ctx;
1065 int size = sizeof(struct bputs_entry);
1068 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1071 if (unlikely(tracing_selftest_running || tracing_disabled))
1074 trace_ctx = tracing_gen_ctx();
1075 buffer = global_trace.array_buffer.buffer;
1077 ring_buffer_nest_start(buffer);
1078 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1083 entry = ring_buffer_event_data(event);
1087 __buffer_unlock_commit(buffer, event);
1088 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1092 ring_buffer_nest_end(buffer);
1095 EXPORT_SYMBOL_GPL(__trace_bputs);
1097 #ifdef CONFIG_TRACER_SNAPSHOT
1098 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1101 struct tracer *tracer = tr->current_trace;
1102 unsigned long flags;
1105 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1106 internal_trace_puts("*** snapshot is being ignored ***\n");
1110 if (!tr->allocated_snapshot) {
1111 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1112 internal_trace_puts("*** stopping trace here! ***\n");
1117 /* Note, snapshot can not be used when the tracer uses it */
1118 if (tracer->use_max_tr) {
1119 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1120 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1124 local_irq_save(flags);
1125 update_max_tr(tr, current, smp_processor_id(), cond_data);
1126 local_irq_restore(flags);
1129 void tracing_snapshot_instance(struct trace_array *tr)
1131 tracing_snapshot_instance_cond(tr, NULL);
1135 * tracing_snapshot - take a snapshot of the current buffer.
1137 * This causes a swap between the snapshot buffer and the current live
1138 * tracing buffer. You can use this to take snapshots of the live
1139 * trace when some condition is triggered, but continue to trace.
1141 * Note, make sure to allocate the snapshot with either
1142 * a tracing_snapshot_alloc(), or by doing it manually
1143 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1145 * If the snapshot buffer is not allocated, it will stop tracing.
1146 * Basically making a permanent snapshot.
1148 void tracing_snapshot(void)
1150 struct trace_array *tr = &global_trace;
1152 tracing_snapshot_instance(tr);
1154 EXPORT_SYMBOL_GPL(tracing_snapshot);
1157 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1158 * @tr: The tracing instance to snapshot
1159 * @cond_data: The data to be tested conditionally, and possibly saved
1161 * This is the same as tracing_snapshot() except that the snapshot is
1162 * conditional - the snapshot will only happen if the
1163 * cond_snapshot.update() implementation receiving the cond_data
1164 * returns true, which means that the trace array's cond_snapshot
1165 * update() operation used the cond_data to determine whether the
1166 * snapshot should be taken, and if it was, presumably saved it along
1167 * with the snapshot.
1169 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1171 tracing_snapshot_instance_cond(tr, cond_data);
1173 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1176 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1177 * @tr: The tracing instance
1179 * When the user enables a conditional snapshot using
1180 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1181 * with the snapshot. This accessor is used to retrieve it.
1183 * Should not be called from cond_snapshot.update(), since it takes
1184 * the tr->max_lock lock, which the code calling
1185 * cond_snapshot.update() has already done.
1187 * Returns the cond_data associated with the trace array's snapshot.
1189 void *tracing_cond_snapshot_data(struct trace_array *tr)
1191 void *cond_data = NULL;
1193 arch_spin_lock(&tr->max_lock);
1195 if (tr->cond_snapshot)
1196 cond_data = tr->cond_snapshot->cond_data;
1198 arch_spin_unlock(&tr->max_lock);
1202 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1204 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1205 struct array_buffer *size_buf, int cpu_id);
1206 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1208 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1212 if (!tr->allocated_snapshot) {
1214 /* allocate spare buffer */
1215 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1216 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1220 tr->allocated_snapshot = true;
1226 static void free_snapshot(struct trace_array *tr)
1229 * We don't free the ring buffer. instead, resize it because
1230 * The max_tr ring buffer has some state (e.g. ring->clock) and
1231 * we want preserve it.
1233 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1234 set_buffer_entries(&tr->max_buffer, 1);
1235 tracing_reset_online_cpus(&tr->max_buffer);
1236 tr->allocated_snapshot = false;
1240 * tracing_alloc_snapshot - allocate snapshot buffer.
1242 * This only allocates the snapshot buffer if it isn't already
1243 * allocated - it doesn't also take a snapshot.
1245 * This is meant to be used in cases where the snapshot buffer needs
1246 * to be set up for events that can't sleep but need to be able to
1247 * trigger a snapshot.
1249 int tracing_alloc_snapshot(void)
1251 struct trace_array *tr = &global_trace;
1254 ret = tracing_alloc_snapshot_instance(tr);
1259 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1262 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1264 * This is similar to tracing_snapshot(), but it will allocate the
1265 * snapshot buffer if it isn't already allocated. Use this only
1266 * where it is safe to sleep, as the allocation may sleep.
1268 * This causes a swap between the snapshot buffer and the current live
1269 * tracing buffer. You can use this to take snapshots of the live
1270 * trace when some condition is triggered, but continue to trace.
1272 void tracing_snapshot_alloc(void)
1276 ret = tracing_alloc_snapshot();
1282 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1285 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1286 * @tr: The tracing instance
1287 * @cond_data: User data to associate with the snapshot
1288 * @update: Implementation of the cond_snapshot update function
1290 * Check whether the conditional snapshot for the given instance has
1291 * already been enabled, or if the current tracer is already using a
1292 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1293 * save the cond_data and update function inside.
1295 * Returns 0 if successful, error otherwise.
1297 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1298 cond_update_fn_t update)
1300 struct cond_snapshot *cond_snapshot;
1303 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1307 cond_snapshot->cond_data = cond_data;
1308 cond_snapshot->update = update;
1310 mutex_lock(&trace_types_lock);
1312 ret = tracing_alloc_snapshot_instance(tr);
1316 if (tr->current_trace->use_max_tr) {
1322 * The cond_snapshot can only change to NULL without the
1323 * trace_types_lock. We don't care if we race with it going
1324 * to NULL, but we want to make sure that it's not set to
1325 * something other than NULL when we get here, which we can
1326 * do safely with only holding the trace_types_lock and not
1327 * having to take the max_lock.
1329 if (tr->cond_snapshot) {
1334 arch_spin_lock(&tr->max_lock);
1335 tr->cond_snapshot = cond_snapshot;
1336 arch_spin_unlock(&tr->max_lock);
1338 mutex_unlock(&trace_types_lock);
1343 mutex_unlock(&trace_types_lock);
1344 kfree(cond_snapshot);
1347 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1350 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1351 * @tr: The tracing instance
1353 * Check whether the conditional snapshot for the given instance is
1354 * enabled; if so, free the cond_snapshot associated with it,
1355 * otherwise return -EINVAL.
1357 * Returns 0 if successful, error otherwise.
1359 int tracing_snapshot_cond_disable(struct trace_array *tr)
1363 arch_spin_lock(&tr->max_lock);
1365 if (!tr->cond_snapshot)
1368 kfree(tr->cond_snapshot);
1369 tr->cond_snapshot = NULL;
1372 arch_spin_unlock(&tr->max_lock);
1376 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1378 void tracing_snapshot(void)
1380 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1382 EXPORT_SYMBOL_GPL(tracing_snapshot);
1383 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1385 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1387 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1388 int tracing_alloc_snapshot(void)
1390 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1393 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1394 void tracing_snapshot_alloc(void)
1399 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1400 void *tracing_cond_snapshot_data(struct trace_array *tr)
1404 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1405 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1409 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1410 int tracing_snapshot_cond_disable(struct trace_array *tr)
1414 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1415 #endif /* CONFIG_TRACER_SNAPSHOT */
1417 void tracer_tracing_off(struct trace_array *tr)
1419 if (tr->array_buffer.buffer)
1420 ring_buffer_record_off(tr->array_buffer.buffer);
1422 * This flag is looked at when buffers haven't been allocated
1423 * yet, or by some tracers (like irqsoff), that just want to
1424 * know if the ring buffer has been disabled, but it can handle
1425 * races of where it gets disabled but we still do a record.
1426 * As the check is in the fast path of the tracers, it is more
1427 * important to be fast than accurate.
1429 tr->buffer_disabled = 1;
1430 /* Make the flag seen by readers */
1435 * tracing_off - turn off tracing buffers
1437 * This function stops the tracing buffers from recording data.
1438 * It does not disable any overhead the tracers themselves may
1439 * be causing. This function simply causes all recording to
1440 * the ring buffers to fail.
1442 void tracing_off(void)
1444 tracer_tracing_off(&global_trace);
1446 EXPORT_SYMBOL_GPL(tracing_off);
1448 void disable_trace_on_warning(void)
1450 if (__disable_trace_on_warning) {
1451 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1452 "Disabling tracing due to warning\n");
1458 * tracer_tracing_is_on - show real state of ring buffer enabled
1459 * @tr : the trace array to know if ring buffer is enabled
1461 * Shows real state of the ring buffer if it is enabled or not.
1463 bool tracer_tracing_is_on(struct trace_array *tr)
1465 if (tr->array_buffer.buffer)
1466 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1467 return !tr->buffer_disabled;
1471 * tracing_is_on - show state of ring buffers enabled
1473 int tracing_is_on(void)
1475 return tracer_tracing_is_on(&global_trace);
1477 EXPORT_SYMBOL_GPL(tracing_is_on);
1479 static int __init set_buf_size(char *str)
1481 unsigned long buf_size;
1485 buf_size = memparse(str, &str);
1486 /* nr_entries can not be zero */
1489 trace_buf_size = buf_size;
1492 __setup("trace_buf_size=", set_buf_size);
1494 static int __init set_tracing_thresh(char *str)
1496 unsigned long threshold;
1501 ret = kstrtoul(str, 0, &threshold);
1504 tracing_thresh = threshold * 1000;
1507 __setup("tracing_thresh=", set_tracing_thresh);
1509 unsigned long nsecs_to_usecs(unsigned long nsecs)
1511 return nsecs / 1000;
1515 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1516 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1517 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1518 * of strings in the order that the evals (enum) were defined.
1523 /* These must match the bit postions in trace_iterator_flags */
1524 static const char *trace_options[] = {
1532 int in_ns; /* is this clock in nanoseconds? */
1533 } trace_clocks[] = {
1534 { trace_clock_local, "local", 1 },
1535 { trace_clock_global, "global", 1 },
1536 { trace_clock_counter, "counter", 0 },
1537 { trace_clock_jiffies, "uptime", 0 },
1538 { trace_clock, "perf", 1 },
1539 { ktime_get_mono_fast_ns, "mono", 1 },
1540 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1541 { ktime_get_boot_fast_ns, "boot", 1 },
1545 bool trace_clock_in_ns(struct trace_array *tr)
1547 if (trace_clocks[tr->clock_id].in_ns)
1554 * trace_parser_get_init - gets the buffer for trace parser
1556 int trace_parser_get_init(struct trace_parser *parser, int size)
1558 memset(parser, 0, sizeof(*parser));
1560 parser->buffer = kmalloc(size, GFP_KERNEL);
1561 if (!parser->buffer)
1564 parser->size = size;
1569 * trace_parser_put - frees the buffer for trace parser
1571 void trace_parser_put(struct trace_parser *parser)
1573 kfree(parser->buffer);
1574 parser->buffer = NULL;
1578 * trace_get_user - reads the user input string separated by space
1579 * (matched by isspace(ch))
1581 * For each string found the 'struct trace_parser' is updated,
1582 * and the function returns.
1584 * Returns number of bytes read.
1586 * See kernel/trace/trace.h for 'struct trace_parser' details.
1588 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1589 size_t cnt, loff_t *ppos)
1596 trace_parser_clear(parser);
1598 ret = get_user(ch, ubuf++);
1606 * The parser is not finished with the last write,
1607 * continue reading the user input without skipping spaces.
1609 if (!parser->cont) {
1610 /* skip white space */
1611 while (cnt && isspace(ch)) {
1612 ret = get_user(ch, ubuf++);
1621 /* only spaces were written */
1622 if (isspace(ch) || !ch) {
1629 /* read the non-space input */
1630 while (cnt && !isspace(ch) && ch) {
1631 if (parser->idx < parser->size - 1)
1632 parser->buffer[parser->idx++] = ch;
1637 ret = get_user(ch, ubuf++);
1644 /* We either got finished input or we have to wait for another call. */
1645 if (isspace(ch) || !ch) {
1646 parser->buffer[parser->idx] = 0;
1647 parser->cont = false;
1648 } else if (parser->idx < parser->size - 1) {
1649 parser->cont = true;
1650 parser->buffer[parser->idx++] = ch;
1651 /* Make sure the parsed string always terminates with '\0'. */
1652 parser->buffer[parser->idx] = 0;
1665 /* TODO add a seq_buf_to_buffer() */
1666 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1670 if (trace_seq_used(s) <= s->seq.readpos)
1673 len = trace_seq_used(s) - s->seq.readpos;
1676 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1678 s->seq.readpos += cnt;
1682 unsigned long __read_mostly tracing_thresh;
1683 static const struct file_operations tracing_max_lat_fops;
1685 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1686 defined(CONFIG_FSNOTIFY)
1688 static struct workqueue_struct *fsnotify_wq;
1690 static void latency_fsnotify_workfn(struct work_struct *work)
1692 struct trace_array *tr = container_of(work, struct trace_array,
1694 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1697 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1699 struct trace_array *tr = container_of(iwork, struct trace_array,
1701 queue_work(fsnotify_wq, &tr->fsnotify_work);
1704 static void trace_create_maxlat_file(struct trace_array *tr,
1705 struct dentry *d_tracer)
1707 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1708 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1709 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1710 d_tracer, &tr->max_latency,
1711 &tracing_max_lat_fops);
1714 __init static int latency_fsnotify_init(void)
1716 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1717 WQ_UNBOUND | WQ_HIGHPRI, 0);
1719 pr_err("Unable to allocate tr_max_lat_wq\n");
1725 late_initcall_sync(latency_fsnotify_init);
1727 void latency_fsnotify(struct trace_array *tr)
1732 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1733 * possible that we are called from __schedule() or do_idle(), which
1734 * could cause a deadlock.
1736 irq_work_queue(&tr->fsnotify_irqwork);
1740 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1741 * defined(CONFIG_FSNOTIFY)
1745 #define trace_create_maxlat_file(tr, d_tracer) \
1746 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1747 &tr->max_latency, &tracing_max_lat_fops)
1751 #ifdef CONFIG_TRACER_MAX_TRACE
1753 * Copy the new maximum trace into the separate maximum-trace
1754 * structure. (this way the maximum trace is permanently saved,
1755 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1758 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1760 struct array_buffer *trace_buf = &tr->array_buffer;
1761 struct array_buffer *max_buf = &tr->max_buffer;
1762 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1763 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1766 max_buf->time_start = data->preempt_timestamp;
1768 max_data->saved_latency = tr->max_latency;
1769 max_data->critical_start = data->critical_start;
1770 max_data->critical_end = data->critical_end;
1772 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1773 max_data->pid = tsk->pid;
1775 * If tsk == current, then use current_uid(), as that does not use
1776 * RCU. The irq tracer can be called out of RCU scope.
1779 max_data->uid = current_uid();
1781 max_data->uid = task_uid(tsk);
1783 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1784 max_data->policy = tsk->policy;
1785 max_data->rt_priority = tsk->rt_priority;
1787 /* record this tasks comm */
1788 tracing_record_cmdline(tsk);
1789 latency_fsnotify(tr);
1793 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1795 * @tsk: the task with the latency
1796 * @cpu: The cpu that initiated the trace.
1797 * @cond_data: User data associated with a conditional snapshot
1799 * Flip the buffers between the @tr and the max_tr and record information
1800 * about which task was the cause of this latency.
1803 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1809 WARN_ON_ONCE(!irqs_disabled());
1811 if (!tr->allocated_snapshot) {
1812 /* Only the nop tracer should hit this when disabling */
1813 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1817 arch_spin_lock(&tr->max_lock);
1819 /* Inherit the recordable setting from array_buffer */
1820 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1821 ring_buffer_record_on(tr->max_buffer.buffer);
1823 ring_buffer_record_off(tr->max_buffer.buffer);
1825 #ifdef CONFIG_TRACER_SNAPSHOT
1826 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1829 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1831 __update_max_tr(tr, tsk, cpu);
1834 arch_spin_unlock(&tr->max_lock);
1838 * update_max_tr_single - only copy one trace over, and reset the rest
1840 * @tsk: task with the latency
1841 * @cpu: the cpu of the buffer to copy.
1843 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1846 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1853 WARN_ON_ONCE(!irqs_disabled());
1854 if (!tr->allocated_snapshot) {
1855 /* Only the nop tracer should hit this when disabling */
1856 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1860 arch_spin_lock(&tr->max_lock);
1862 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1864 if (ret == -EBUSY) {
1866 * We failed to swap the buffer due to a commit taking
1867 * place on this CPU. We fail to record, but we reset
1868 * the max trace buffer (no one writes directly to it)
1869 * and flag that it failed.
1871 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1872 "Failed to swap buffers due to commit in progress\n");
1875 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1877 __update_max_tr(tr, tsk, cpu);
1878 arch_spin_unlock(&tr->max_lock);
1880 #endif /* CONFIG_TRACER_MAX_TRACE */
1882 static int wait_on_pipe(struct trace_iterator *iter, int full)
1884 /* Iterators are static, they should be filled or empty */
1885 if (trace_buffer_iter(iter, iter->cpu_file))
1888 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1892 #ifdef CONFIG_FTRACE_STARTUP_TEST
1893 static bool selftests_can_run;
1895 struct trace_selftests {
1896 struct list_head list;
1897 struct tracer *type;
1900 static LIST_HEAD(postponed_selftests);
1902 static int save_selftest(struct tracer *type)
1904 struct trace_selftests *selftest;
1906 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1910 selftest->type = type;
1911 list_add(&selftest->list, &postponed_selftests);
1915 static int run_tracer_selftest(struct tracer *type)
1917 struct trace_array *tr = &global_trace;
1918 struct tracer *saved_tracer = tr->current_trace;
1921 if (!type->selftest || tracing_selftest_disabled)
1925 * If a tracer registers early in boot up (before scheduling is
1926 * initialized and such), then do not run its selftests yet.
1927 * Instead, run it a little later in the boot process.
1929 if (!selftests_can_run)
1930 return save_selftest(type);
1932 if (!tracing_is_on()) {
1933 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1939 * Run a selftest on this tracer.
1940 * Here we reset the trace buffer, and set the current
1941 * tracer to be this tracer. The tracer can then run some
1942 * internal tracing to verify that everything is in order.
1943 * If we fail, we do not register this tracer.
1945 tracing_reset_online_cpus(&tr->array_buffer);
1947 tr->current_trace = type;
1949 #ifdef CONFIG_TRACER_MAX_TRACE
1950 if (type->use_max_tr) {
1951 /* If we expanded the buffers, make sure the max is expanded too */
1952 if (ring_buffer_expanded)
1953 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1954 RING_BUFFER_ALL_CPUS);
1955 tr->allocated_snapshot = true;
1959 /* the test is responsible for initializing and enabling */
1960 pr_info("Testing tracer %s: ", type->name);
1961 ret = type->selftest(type, tr);
1962 /* the test is responsible for resetting too */
1963 tr->current_trace = saved_tracer;
1965 printk(KERN_CONT "FAILED!\n");
1966 /* Add the warning after printing 'FAILED' */
1970 /* Only reset on passing, to avoid touching corrupted buffers */
1971 tracing_reset_online_cpus(&tr->array_buffer);
1973 #ifdef CONFIG_TRACER_MAX_TRACE
1974 if (type->use_max_tr) {
1975 tr->allocated_snapshot = false;
1977 /* Shrink the max buffer again */
1978 if (ring_buffer_expanded)
1979 ring_buffer_resize(tr->max_buffer.buffer, 1,
1980 RING_BUFFER_ALL_CPUS);
1984 printk(KERN_CONT "PASSED\n");
1988 static __init int init_trace_selftests(void)
1990 struct trace_selftests *p, *n;
1991 struct tracer *t, **last;
1994 selftests_can_run = true;
1996 mutex_lock(&trace_types_lock);
1998 if (list_empty(&postponed_selftests))
2001 pr_info("Running postponed tracer tests:\n");
2003 tracing_selftest_running = true;
2004 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2005 /* This loop can take minutes when sanitizers are enabled, so
2006 * lets make sure we allow RCU processing.
2009 ret = run_tracer_selftest(p->type);
2010 /* If the test fails, then warn and remove from available_tracers */
2012 WARN(1, "tracer: %s failed selftest, disabling\n",
2014 last = &trace_types;
2015 for (t = trace_types; t; t = t->next) {
2026 tracing_selftest_running = false;
2029 mutex_unlock(&trace_types_lock);
2033 core_initcall(init_trace_selftests);
2035 static inline int run_tracer_selftest(struct tracer *type)
2039 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2041 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2043 static void __init apply_trace_boot_options(void);
2046 * register_tracer - register a tracer with the ftrace system.
2047 * @type: the plugin for the tracer
2049 * Register a new plugin tracer.
2051 int __init register_tracer(struct tracer *type)
2057 pr_info("Tracer must have a name\n");
2061 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2062 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2066 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2067 pr_warn("Can not register tracer %s due to lockdown\n",
2072 mutex_lock(&trace_types_lock);
2074 tracing_selftest_running = true;
2076 for (t = trace_types; t; t = t->next) {
2077 if (strcmp(type->name, t->name) == 0) {
2079 pr_info("Tracer %s already registered\n",
2086 if (!type->set_flag)
2087 type->set_flag = &dummy_set_flag;
2089 /*allocate a dummy tracer_flags*/
2090 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2095 type->flags->val = 0;
2096 type->flags->opts = dummy_tracer_opt;
2098 if (!type->flags->opts)
2099 type->flags->opts = dummy_tracer_opt;
2101 /* store the tracer for __set_tracer_option */
2102 type->flags->trace = type;
2104 ret = run_tracer_selftest(type);
2108 type->next = trace_types;
2110 add_tracer_options(&global_trace, type);
2113 tracing_selftest_running = false;
2114 mutex_unlock(&trace_types_lock);
2116 if (ret || !default_bootup_tracer)
2119 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2122 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2123 /* Do we want this tracer to start on bootup? */
2124 tracing_set_tracer(&global_trace, type->name);
2125 default_bootup_tracer = NULL;
2127 apply_trace_boot_options();
2129 /* disable other selftests, since this will break it. */
2130 disable_tracing_selftest("running a tracer");
2136 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2138 struct trace_buffer *buffer = buf->buffer;
2143 ring_buffer_record_disable(buffer);
2145 /* Make sure all commits have finished */
2147 ring_buffer_reset_cpu(buffer, cpu);
2149 ring_buffer_record_enable(buffer);
2152 void tracing_reset_online_cpus(struct array_buffer *buf)
2154 struct trace_buffer *buffer = buf->buffer;
2159 ring_buffer_record_disable(buffer);
2161 /* Make sure all commits have finished */
2164 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2166 ring_buffer_reset_online_cpus(buffer);
2168 ring_buffer_record_enable(buffer);
2171 /* Must have trace_types_lock held */
2172 void tracing_reset_all_online_cpus(void)
2174 struct trace_array *tr;
2176 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2177 if (!tr->clear_trace)
2179 tr->clear_trace = false;
2180 tracing_reset_online_cpus(&tr->array_buffer);
2181 #ifdef CONFIG_TRACER_MAX_TRACE
2182 tracing_reset_online_cpus(&tr->max_buffer);
2187 static int *tgid_map;
2189 #define SAVED_CMDLINES_DEFAULT 128
2190 #define NO_CMDLINE_MAP UINT_MAX
2191 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2192 struct saved_cmdlines_buffer {
2193 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2194 unsigned *map_cmdline_to_pid;
2195 unsigned cmdline_num;
2197 char *saved_cmdlines;
2199 static struct saved_cmdlines_buffer *savedcmd;
2201 /* temporary disable recording */
2202 static atomic_t trace_record_taskinfo_disabled __read_mostly;
2204 static inline char *get_saved_cmdlines(int idx)
2206 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2209 static inline void set_cmdline(int idx, const char *cmdline)
2211 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2214 static int allocate_cmdlines_buffer(unsigned int val,
2215 struct saved_cmdlines_buffer *s)
2217 s->map_cmdline_to_pid = kmalloc_array(val,
2218 sizeof(*s->map_cmdline_to_pid),
2220 if (!s->map_cmdline_to_pid)
2223 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2224 if (!s->saved_cmdlines) {
2225 kfree(s->map_cmdline_to_pid);
2230 s->cmdline_num = val;
2231 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2232 sizeof(s->map_pid_to_cmdline));
2233 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2234 val * sizeof(*s->map_cmdline_to_pid));
2239 static int trace_create_savedcmd(void)
2243 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2247 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2257 int is_tracing_stopped(void)
2259 return global_trace.stop_count;
2263 * tracing_start - quick start of the tracer
2265 * If tracing is enabled but was stopped by tracing_stop,
2266 * this will start the tracer back up.
2268 void tracing_start(void)
2270 struct trace_buffer *buffer;
2271 unsigned long flags;
2273 if (tracing_disabled)
2276 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2277 if (--global_trace.stop_count) {
2278 if (global_trace.stop_count < 0) {
2279 /* Someone screwed up their debugging */
2281 global_trace.stop_count = 0;
2286 /* Prevent the buffers from switching */
2287 arch_spin_lock(&global_trace.max_lock);
2289 buffer = global_trace.array_buffer.buffer;
2291 ring_buffer_record_enable(buffer);
2293 #ifdef CONFIG_TRACER_MAX_TRACE
2294 buffer = global_trace.max_buffer.buffer;
2296 ring_buffer_record_enable(buffer);
2299 arch_spin_unlock(&global_trace.max_lock);
2302 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2305 static void tracing_start_tr(struct trace_array *tr)
2307 struct trace_buffer *buffer;
2308 unsigned long flags;
2310 if (tracing_disabled)
2313 /* If global, we need to also start the max tracer */
2314 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2315 return tracing_start();
2317 raw_spin_lock_irqsave(&tr->start_lock, flags);
2319 if (--tr->stop_count) {
2320 if (tr->stop_count < 0) {
2321 /* Someone screwed up their debugging */
2328 buffer = tr->array_buffer.buffer;
2330 ring_buffer_record_enable(buffer);
2333 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2337 * tracing_stop - quick stop of the tracer
2339 * Light weight way to stop tracing. Use in conjunction with
2342 void tracing_stop(void)
2344 struct trace_buffer *buffer;
2345 unsigned long flags;
2347 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2348 if (global_trace.stop_count++)
2351 /* Prevent the buffers from switching */
2352 arch_spin_lock(&global_trace.max_lock);
2354 buffer = global_trace.array_buffer.buffer;
2356 ring_buffer_record_disable(buffer);
2358 #ifdef CONFIG_TRACER_MAX_TRACE
2359 buffer = global_trace.max_buffer.buffer;
2361 ring_buffer_record_disable(buffer);
2364 arch_spin_unlock(&global_trace.max_lock);
2367 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2370 static void tracing_stop_tr(struct trace_array *tr)
2372 struct trace_buffer *buffer;
2373 unsigned long flags;
2375 /* If global, we need to also stop the max tracer */
2376 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2377 return tracing_stop();
2379 raw_spin_lock_irqsave(&tr->start_lock, flags);
2380 if (tr->stop_count++)
2383 buffer = tr->array_buffer.buffer;
2385 ring_buffer_record_disable(buffer);
2388 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2391 static int trace_save_cmdline(struct task_struct *tsk)
2395 /* treat recording of idle task as a success */
2399 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2403 * It's not the end of the world if we don't get
2404 * the lock, but we also don't want to spin
2405 * nor do we want to disable interrupts,
2406 * so if we miss here, then better luck next time.
2408 if (!arch_spin_trylock(&trace_cmdline_lock))
2411 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2412 if (idx == NO_CMDLINE_MAP) {
2413 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2416 * Check whether the cmdline buffer at idx has a pid
2417 * mapped. We are going to overwrite that entry so we
2418 * need to clear the map_pid_to_cmdline. Otherwise we
2419 * would read the new comm for the old pid.
2421 pid = savedcmd->map_cmdline_to_pid[idx];
2422 if (pid != NO_CMDLINE_MAP)
2423 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2425 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2426 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2428 savedcmd->cmdline_idx = idx;
2431 set_cmdline(idx, tsk->comm);
2433 arch_spin_unlock(&trace_cmdline_lock);
2438 static void __trace_find_cmdline(int pid, char comm[])
2443 strcpy(comm, "<idle>");
2447 if (WARN_ON_ONCE(pid < 0)) {
2448 strcpy(comm, "<XXX>");
2452 if (pid > PID_MAX_DEFAULT) {
2453 strcpy(comm, "<...>");
2457 map = savedcmd->map_pid_to_cmdline[pid];
2458 if (map != NO_CMDLINE_MAP)
2459 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2461 strcpy(comm, "<...>");
2464 void trace_find_cmdline(int pid, char comm[])
2467 arch_spin_lock(&trace_cmdline_lock);
2469 __trace_find_cmdline(pid, comm);
2471 arch_spin_unlock(&trace_cmdline_lock);
2475 int trace_find_tgid(int pid)
2477 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2480 return tgid_map[pid];
2483 static int trace_save_tgid(struct task_struct *tsk)
2485 /* treat recording of idle task as a success */
2489 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2492 tgid_map[tsk->pid] = tsk->tgid;
2496 static bool tracing_record_taskinfo_skip(int flags)
2498 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2500 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2502 if (!__this_cpu_read(trace_taskinfo_save))
2508 * tracing_record_taskinfo - record the task info of a task
2510 * @task: task to record
2511 * @flags: TRACE_RECORD_CMDLINE for recording comm
2512 * TRACE_RECORD_TGID for recording tgid
2514 void tracing_record_taskinfo(struct task_struct *task, int flags)
2518 if (tracing_record_taskinfo_skip(flags))
2522 * Record as much task information as possible. If some fail, continue
2523 * to try to record the others.
2525 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2526 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2528 /* If recording any information failed, retry again soon. */
2532 __this_cpu_write(trace_taskinfo_save, false);
2536 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2538 * @prev: previous task during sched_switch
2539 * @next: next task during sched_switch
2540 * @flags: TRACE_RECORD_CMDLINE for recording comm
2541 * TRACE_RECORD_TGID for recording tgid
2543 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2544 struct task_struct *next, int flags)
2548 if (tracing_record_taskinfo_skip(flags))
2552 * Record as much task information as possible. If some fail, continue
2553 * to try to record the others.
2555 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2556 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2557 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2558 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2560 /* If recording any information failed, retry again soon. */
2564 __this_cpu_write(trace_taskinfo_save, false);
2567 /* Helpers to record a specific task information */
2568 void tracing_record_cmdline(struct task_struct *task)
2570 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2573 void tracing_record_tgid(struct task_struct *task)
2575 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2579 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2580 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2581 * simplifies those functions and keeps them in sync.
2583 enum print_line_t trace_handle_return(struct trace_seq *s)
2585 return trace_seq_has_overflowed(s) ?
2586 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2588 EXPORT_SYMBOL_GPL(trace_handle_return);
2590 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2592 unsigned int trace_flags = irqs_status;
2595 pc = preempt_count();
2598 trace_flags |= TRACE_FLAG_NMI;
2599 if (pc & HARDIRQ_MASK)
2600 trace_flags |= TRACE_FLAG_HARDIRQ;
2601 if (in_serving_softirq())
2602 trace_flags |= TRACE_FLAG_SOFTIRQ;
2604 if (tif_need_resched())
2605 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2606 if (test_preempt_need_resched())
2607 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2608 return (trace_flags << 16) | (pc & 0xff);
2611 struct ring_buffer_event *
2612 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2615 unsigned int trace_ctx)
2617 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2620 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2621 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2622 static int trace_buffered_event_ref;
2625 * trace_buffered_event_enable - enable buffering events
2627 * When events are being filtered, it is quicker to use a temporary
2628 * buffer to write the event data into if there's a likely chance
2629 * that it will not be committed. The discard of the ring buffer
2630 * is not as fast as committing, and is much slower than copying
2633 * When an event is to be filtered, allocate per cpu buffers to
2634 * write the event data into, and if the event is filtered and discarded
2635 * it is simply dropped, otherwise, the entire data is to be committed
2638 void trace_buffered_event_enable(void)
2640 struct ring_buffer_event *event;
2644 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2646 if (trace_buffered_event_ref++)
2649 for_each_tracing_cpu(cpu) {
2650 page = alloc_pages_node(cpu_to_node(cpu),
2651 GFP_KERNEL | __GFP_NORETRY, 0);
2655 event = page_address(page);
2656 memset(event, 0, sizeof(*event));
2658 per_cpu(trace_buffered_event, cpu) = event;
2661 if (cpu == smp_processor_id() &&
2662 __this_cpu_read(trace_buffered_event) !=
2663 per_cpu(trace_buffered_event, cpu))
2670 trace_buffered_event_disable();
2673 static void enable_trace_buffered_event(void *data)
2675 /* Probably not needed, but do it anyway */
2677 this_cpu_dec(trace_buffered_event_cnt);
2680 static void disable_trace_buffered_event(void *data)
2682 this_cpu_inc(trace_buffered_event_cnt);
2686 * trace_buffered_event_disable - disable buffering events
2688 * When a filter is removed, it is faster to not use the buffered
2689 * events, and to commit directly into the ring buffer. Free up
2690 * the temp buffers when there are no more users. This requires
2691 * special synchronization with current events.
2693 void trace_buffered_event_disable(void)
2697 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2699 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2702 if (--trace_buffered_event_ref)
2706 /* For each CPU, set the buffer as used. */
2707 smp_call_function_many(tracing_buffer_mask,
2708 disable_trace_buffered_event, NULL, 1);
2711 /* Wait for all current users to finish */
2714 for_each_tracing_cpu(cpu) {
2715 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2716 per_cpu(trace_buffered_event, cpu) = NULL;
2719 * Make sure trace_buffered_event is NULL before clearing
2720 * trace_buffered_event_cnt.
2725 /* Do the work on each cpu */
2726 smp_call_function_many(tracing_buffer_mask,
2727 enable_trace_buffered_event, NULL, 1);
2731 static struct trace_buffer *temp_buffer;
2733 struct ring_buffer_event *
2734 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2735 struct trace_event_file *trace_file,
2736 int type, unsigned long len,
2737 unsigned int trace_ctx)
2739 struct ring_buffer_event *entry;
2742 *current_rb = trace_file->tr->array_buffer.buffer;
2744 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2745 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2746 (entry = this_cpu_read(trace_buffered_event))) {
2747 /* Try to use the per cpu buffer first */
2748 val = this_cpu_inc_return(trace_buffered_event_cnt);
2749 if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
2750 trace_event_setup(entry, type, trace_ctx);
2751 entry->array[0] = len;
2754 this_cpu_dec(trace_buffered_event_cnt);
2757 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2760 * If tracing is off, but we have triggers enabled
2761 * we still need to look at the event data. Use the temp_buffer
2762 * to store the trace event for the trigger to use. It's recursive
2763 * safe and will not be recorded anywhere.
2765 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2766 *current_rb = temp_buffer;
2767 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2772 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2774 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2775 static DEFINE_MUTEX(tracepoint_printk_mutex);
2777 static void output_printk(struct trace_event_buffer *fbuffer)
2779 struct trace_event_call *event_call;
2780 struct trace_event_file *file;
2781 struct trace_event *event;
2782 unsigned long flags;
2783 struct trace_iterator *iter = tracepoint_print_iter;
2785 /* We should never get here if iter is NULL */
2786 if (WARN_ON_ONCE(!iter))
2789 event_call = fbuffer->trace_file->event_call;
2790 if (!event_call || !event_call->event.funcs ||
2791 !event_call->event.funcs->trace)
2794 file = fbuffer->trace_file;
2795 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2796 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2797 !filter_match_preds(file->filter, fbuffer->entry)))
2800 event = &fbuffer->trace_file->event_call->event;
2802 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2803 trace_seq_init(&iter->seq);
2804 iter->ent = fbuffer->entry;
2805 event_call->event.funcs->trace(iter, 0, event);
2806 trace_seq_putc(&iter->seq, 0);
2807 printk("%s", iter->seq.buffer);
2809 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2812 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2813 void *buffer, size_t *lenp,
2816 int save_tracepoint_printk;
2819 mutex_lock(&tracepoint_printk_mutex);
2820 save_tracepoint_printk = tracepoint_printk;
2822 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2825 * This will force exiting early, as tracepoint_printk
2826 * is always zero when tracepoint_printk_iter is not allocated
2828 if (!tracepoint_print_iter)
2829 tracepoint_printk = 0;
2831 if (save_tracepoint_printk == tracepoint_printk)
2834 if (tracepoint_printk)
2835 static_key_enable(&tracepoint_printk_key.key);
2837 static_key_disable(&tracepoint_printk_key.key);
2840 mutex_unlock(&tracepoint_printk_mutex);
2845 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2847 if (static_key_false(&tracepoint_printk_key.key))
2848 output_printk(fbuffer);
2850 if (static_branch_unlikely(&trace_event_exports_enabled))
2851 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2852 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2853 fbuffer->event, fbuffer->entry,
2854 fbuffer->trace_ctx, fbuffer->regs);
2856 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2861 * trace_buffer_unlock_commit_regs()
2862 * trace_event_buffer_commit()
2863 * trace_event_raw_event_xxx()
2865 # define STACK_SKIP 3
2867 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2868 struct trace_buffer *buffer,
2869 struct ring_buffer_event *event,
2870 unsigned int trace_ctx,
2871 struct pt_regs *regs)
2873 __buffer_unlock_commit(buffer, event);
2876 * If regs is not set, then skip the necessary functions.
2877 * Note, we can still get here via blktrace, wakeup tracer
2878 * and mmiotrace, but that's ok if they lose a function or
2879 * two. They are not that meaningful.
2881 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2882 ftrace_trace_userstack(tr, buffer, trace_ctx);
2886 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2889 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2890 struct ring_buffer_event *event)
2892 __buffer_unlock_commit(buffer, event);
2896 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2897 parent_ip, unsigned int trace_ctx)
2899 struct trace_event_call *call = &event_function;
2900 struct trace_buffer *buffer = tr->array_buffer.buffer;
2901 struct ring_buffer_event *event;
2902 struct ftrace_entry *entry;
2904 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2908 entry = ring_buffer_event_data(event);
2910 entry->parent_ip = parent_ip;
2912 if (!call_filter_check_discard(call, entry, buffer, event)) {
2913 if (static_branch_unlikely(&trace_function_exports_enabled))
2914 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2915 __buffer_unlock_commit(buffer, event);
2919 #ifdef CONFIG_STACKTRACE
2921 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2922 #define FTRACE_KSTACK_NESTING 4
2924 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2926 struct ftrace_stack {
2927 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2931 struct ftrace_stacks {
2932 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2935 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2936 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2938 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2939 unsigned int trace_ctx,
2940 int skip, struct pt_regs *regs)
2942 struct trace_event_call *call = &event_kernel_stack;
2943 struct ring_buffer_event *event;
2944 unsigned int size, nr_entries;
2945 struct ftrace_stack *fstack;
2946 struct stack_entry *entry;
2950 * Add one, for this function and the call to save_stack_trace()
2951 * If regs is set, then these functions will not be in the way.
2953 #ifndef CONFIG_UNWINDER_ORC
2958 preempt_disable_notrace();
2960 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2962 /* This should never happen. If it does, yell once and skip */
2963 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2967 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2968 * interrupt will either see the value pre increment or post
2969 * increment. If the interrupt happens pre increment it will have
2970 * restored the counter when it returns. We just need a barrier to
2971 * keep gcc from moving things around.
2975 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2976 size = ARRAY_SIZE(fstack->calls);
2979 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2982 nr_entries = stack_trace_save(fstack->calls, size, skip);
2985 size = nr_entries * sizeof(unsigned long);
2986 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2987 (sizeof(*entry) - sizeof(entry->caller)) + size,
2991 entry = ring_buffer_event_data(event);
2993 memcpy(&entry->caller, fstack->calls, size);
2994 entry->size = nr_entries;
2996 if (!call_filter_check_discard(call, entry, buffer, event))
2997 __buffer_unlock_commit(buffer, event);
3000 /* Again, don't let gcc optimize things here */
3002 __this_cpu_dec(ftrace_stack_reserve);
3003 preempt_enable_notrace();
3007 static inline void ftrace_trace_stack(struct trace_array *tr,
3008 struct trace_buffer *buffer,
3009 unsigned int trace_ctx,
3010 int skip, struct pt_regs *regs)
3012 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3015 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3018 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3021 struct trace_buffer *buffer = tr->array_buffer.buffer;
3023 if (rcu_is_watching()) {
3024 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3029 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3030 * but if the above rcu_is_watching() failed, then the NMI
3031 * triggered someplace critical, and rcu_irq_enter() should
3032 * not be called from NMI.
3034 if (unlikely(in_nmi()))
3037 rcu_irq_enter_irqson();
3038 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3039 rcu_irq_exit_irqson();
3043 * trace_dump_stack - record a stack back trace in the trace buffer
3044 * @skip: Number of functions to skip (helper handlers)
3046 void trace_dump_stack(int skip)
3048 if (tracing_disabled || tracing_selftest_running)
3051 #ifndef CONFIG_UNWINDER_ORC
3052 /* Skip 1 to skip this function. */
3055 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3056 tracing_gen_ctx(), skip, NULL);
3058 EXPORT_SYMBOL_GPL(trace_dump_stack);
3060 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3061 static DEFINE_PER_CPU(int, user_stack_count);
3064 ftrace_trace_userstack(struct trace_array *tr,
3065 struct trace_buffer *buffer, unsigned int trace_ctx)
3067 struct trace_event_call *call = &event_user_stack;
3068 struct ring_buffer_event *event;
3069 struct userstack_entry *entry;
3071 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3075 * NMIs can not handle page faults, even with fix ups.
3076 * The save user stack can (and often does) fault.
3078 if (unlikely(in_nmi()))
3082 * prevent recursion, since the user stack tracing may
3083 * trigger other kernel events.
3086 if (__this_cpu_read(user_stack_count))
3089 __this_cpu_inc(user_stack_count);
3091 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3092 sizeof(*entry), trace_ctx);
3094 goto out_drop_count;
3095 entry = ring_buffer_event_data(event);
3097 entry->tgid = current->tgid;
3098 memset(&entry->caller, 0, sizeof(entry->caller));
3100 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3101 if (!call_filter_check_discard(call, entry, buffer, event))
3102 __buffer_unlock_commit(buffer, event);
3105 __this_cpu_dec(user_stack_count);
3109 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3110 static void ftrace_trace_userstack(struct trace_array *tr,
3111 struct trace_buffer *buffer,
3112 unsigned int trace_ctx)
3115 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3117 #endif /* CONFIG_STACKTRACE */
3119 /* created for use with alloc_percpu */
3120 struct trace_buffer_struct {
3122 char buffer[4][TRACE_BUF_SIZE];
3125 static struct trace_buffer_struct *trace_percpu_buffer;
3128 * This allows for lockless recording. If we're nested too deeply, then
3129 * this returns NULL.
3131 static char *get_trace_buf(void)
3133 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3135 if (!buffer || buffer->nesting >= 4)
3140 /* Interrupts must see nesting incremented before we use the buffer */
3142 return &buffer->buffer[buffer->nesting - 1][0];
3145 static void put_trace_buf(void)
3147 /* Don't let the decrement of nesting leak before this */
3149 this_cpu_dec(trace_percpu_buffer->nesting);
3152 static int alloc_percpu_trace_buffer(void)
3154 struct trace_buffer_struct *buffers;
3156 if (trace_percpu_buffer)
3159 buffers = alloc_percpu(struct trace_buffer_struct);
3160 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3163 trace_percpu_buffer = buffers;
3167 static int buffers_allocated;
3169 void trace_printk_init_buffers(void)
3171 if (buffers_allocated)
3174 if (alloc_percpu_trace_buffer())
3177 /* trace_printk() is for debug use only. Don't use it in production. */
3180 pr_warn("**********************************************************\n");
3181 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3183 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3185 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3186 pr_warn("** unsafe for production use. **\n");
3188 pr_warn("** If you see this message and you are not debugging **\n");
3189 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3191 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3192 pr_warn("**********************************************************\n");
3194 /* Expand the buffers to set size */
3195 tracing_update_buffers();
3197 buffers_allocated = 1;
3200 * trace_printk_init_buffers() can be called by modules.
3201 * If that happens, then we need to start cmdline recording
3202 * directly here. If the global_trace.buffer is already
3203 * allocated here, then this was called by module code.
3205 if (global_trace.array_buffer.buffer)
3206 tracing_start_cmdline_record();
3208 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3210 void trace_printk_start_comm(void)
3212 /* Start tracing comms if trace printk is set */
3213 if (!buffers_allocated)
3215 tracing_start_cmdline_record();
3218 static void trace_printk_start_stop_comm(int enabled)
3220 if (!buffers_allocated)
3224 tracing_start_cmdline_record();
3226 tracing_stop_cmdline_record();
3230 * trace_vbprintk - write binary msg to tracing buffer
3231 * @ip: The address of the caller
3232 * @fmt: The string format to write to the buffer
3233 * @args: Arguments for @fmt
3235 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3237 struct trace_event_call *call = &event_bprint;
3238 struct ring_buffer_event *event;
3239 struct trace_buffer *buffer;
3240 struct trace_array *tr = &global_trace;
3241 struct bprint_entry *entry;
3242 unsigned int trace_ctx;
3246 if (unlikely(tracing_selftest_running || tracing_disabled))
3249 /* Don't pollute graph traces with trace_vprintk internals */
3250 pause_graph_tracing();
3252 trace_ctx = tracing_gen_ctx();
3253 preempt_disable_notrace();
3255 tbuffer = get_trace_buf();
3261 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3263 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3266 size = sizeof(*entry) + sizeof(u32) * len;
3267 buffer = tr->array_buffer.buffer;
3268 ring_buffer_nest_start(buffer);
3269 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3273 entry = ring_buffer_event_data(event);
3277 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3278 if (!call_filter_check_discard(call, entry, buffer, event)) {
3279 __buffer_unlock_commit(buffer, event);
3280 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3284 ring_buffer_nest_end(buffer);
3289 preempt_enable_notrace();
3290 unpause_graph_tracing();
3294 EXPORT_SYMBOL_GPL(trace_vbprintk);
3298 __trace_array_vprintk(struct trace_buffer *buffer,
3299 unsigned long ip, const char *fmt, va_list args)
3301 struct trace_event_call *call = &event_print;
3302 struct ring_buffer_event *event;
3304 struct print_entry *entry;
3305 unsigned int trace_ctx;
3308 if (tracing_disabled || tracing_selftest_running)
3311 /* Don't pollute graph traces with trace_vprintk internals */
3312 pause_graph_tracing();
3314 trace_ctx = tracing_gen_ctx();
3315 preempt_disable_notrace();
3318 tbuffer = get_trace_buf();
3324 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3326 size = sizeof(*entry) + len + 1;
3327 ring_buffer_nest_start(buffer);
3328 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3332 entry = ring_buffer_event_data(event);
3335 memcpy(&entry->buf, tbuffer, len + 1);
3336 if (!call_filter_check_discard(call, entry, buffer, event)) {
3337 __buffer_unlock_commit(buffer, event);
3338 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3342 ring_buffer_nest_end(buffer);
3346 preempt_enable_notrace();
3347 unpause_graph_tracing();
3353 int trace_array_vprintk(struct trace_array *tr,
3354 unsigned long ip, const char *fmt, va_list args)
3356 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3360 * trace_array_printk - Print a message to a specific instance
3361 * @tr: The instance trace_array descriptor
3362 * @ip: The instruction pointer that this is called from.
3363 * @fmt: The format to print (printf format)
3365 * If a subsystem sets up its own instance, they have the right to
3366 * printk strings into their tracing instance buffer using this
3367 * function. Note, this function will not write into the top level
3368 * buffer (use trace_printk() for that), as writing into the top level
3369 * buffer should only have events that can be individually disabled.
3370 * trace_printk() is only used for debugging a kernel, and should not
3371 * be ever encorporated in normal use.
3373 * trace_array_printk() can be used, as it will not add noise to the
3374 * top level tracing buffer.
3376 * Note, trace_array_init_printk() must be called on @tr before this
3380 int trace_array_printk(struct trace_array *tr,
3381 unsigned long ip, const char *fmt, ...)
3389 /* This is only allowed for created instances */
3390 if (tr == &global_trace)
3393 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3397 ret = trace_array_vprintk(tr, ip, fmt, ap);
3401 EXPORT_SYMBOL_GPL(trace_array_printk);
3404 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3405 * @tr: The trace array to initialize the buffers for
3407 * As trace_array_printk() only writes into instances, they are OK to
3408 * have in the kernel (unlike trace_printk()). This needs to be called
3409 * before trace_array_printk() can be used on a trace_array.
3411 int trace_array_init_printk(struct trace_array *tr)
3416 /* This is only allowed for created instances */
3417 if (tr == &global_trace)
3420 return alloc_percpu_trace_buffer();
3422 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3425 int trace_array_printk_buf(struct trace_buffer *buffer,
3426 unsigned long ip, const char *fmt, ...)
3431 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3435 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3441 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3443 return trace_array_vprintk(&global_trace, ip, fmt, args);
3445 EXPORT_SYMBOL_GPL(trace_vprintk);
3447 static void trace_iterator_increment(struct trace_iterator *iter)
3449 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3453 ring_buffer_iter_advance(buf_iter);
3456 static struct trace_entry *
3457 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3458 unsigned long *lost_events)
3460 struct ring_buffer_event *event;
3461 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3464 event = ring_buffer_iter_peek(buf_iter, ts);
3466 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3467 (unsigned long)-1 : 0;
3469 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3474 iter->ent_size = ring_buffer_event_length(event);
3475 return ring_buffer_event_data(event);
3481 static struct trace_entry *
3482 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3483 unsigned long *missing_events, u64 *ent_ts)
3485 struct trace_buffer *buffer = iter->array_buffer->buffer;
3486 struct trace_entry *ent, *next = NULL;
3487 unsigned long lost_events = 0, next_lost = 0;
3488 int cpu_file = iter->cpu_file;
3489 u64 next_ts = 0, ts;
3495 * If we are in a per_cpu trace file, don't bother by iterating over
3496 * all cpu and peek directly.
3498 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3499 if (ring_buffer_empty_cpu(buffer, cpu_file))
3501 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3503 *ent_cpu = cpu_file;
3508 for_each_tracing_cpu(cpu) {
3510 if (ring_buffer_empty_cpu(buffer, cpu))
3513 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3516 * Pick the entry with the smallest timestamp:
3518 if (ent && (!next || ts < next_ts)) {
3522 next_lost = lost_events;
3523 next_size = iter->ent_size;
3527 iter->ent_size = next_size;
3530 *ent_cpu = next_cpu;
3536 *missing_events = next_lost;
3541 #define STATIC_FMT_BUF_SIZE 128
3542 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3544 static char *trace_iter_expand_format(struct trace_iterator *iter)
3549 * iter->tr is NULL when used with tp_printk, which makes
3550 * this get called where it is not safe to call krealloc().
3552 if (!iter->tr || iter->fmt == static_fmt_buf)
3555 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3558 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3565 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3567 const char *p, *new_fmt;
3570 if (WARN_ON_ONCE(!fmt))
3573 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3577 new_fmt = q = iter->fmt;
3579 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3580 if (!trace_iter_expand_format(iter))
3583 q += iter->fmt - new_fmt;
3584 new_fmt = iter->fmt;
3589 /* Replace %p with %px */
3593 } else if (p[0] == 'p' && !isalnum(p[1])) {
3604 #define STATIC_TEMP_BUF_SIZE 128
3605 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3607 /* Find the next real entry, without updating the iterator itself */
3608 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3609 int *ent_cpu, u64 *ent_ts)
3611 /* __find_next_entry will reset ent_size */
3612 int ent_size = iter->ent_size;
3613 struct trace_entry *entry;
3616 * If called from ftrace_dump(), then the iter->temp buffer
3617 * will be the static_temp_buf and not created from kmalloc.
3618 * If the entry size is greater than the buffer, we can
3619 * not save it. Just return NULL in that case. This is only
3620 * used to add markers when two consecutive events' time
3621 * stamps have a large delta. See trace_print_lat_context()
3623 if (iter->temp == static_temp_buf &&
3624 STATIC_TEMP_BUF_SIZE < ent_size)
3628 * The __find_next_entry() may call peek_next_entry(), which may
3629 * call ring_buffer_peek() that may make the contents of iter->ent
3630 * undefined. Need to copy iter->ent now.
3632 if (iter->ent && iter->ent != iter->temp) {
3633 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3634 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3636 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3641 iter->temp_size = iter->ent_size;
3643 memcpy(iter->temp, iter->ent, iter->ent_size);
3644 iter->ent = iter->temp;
3646 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3647 /* Put back the original ent_size */
3648 iter->ent_size = ent_size;
3653 /* Find the next real entry, and increment the iterator to the next entry */
3654 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3656 iter->ent = __find_next_entry(iter, &iter->cpu,
3657 &iter->lost_events, &iter->ts);
3660 trace_iterator_increment(iter);
3662 return iter->ent ? iter : NULL;
3665 static void trace_consume(struct trace_iterator *iter)
3667 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3668 &iter->lost_events);
3671 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3673 struct trace_iterator *iter = m->private;
3677 WARN_ON_ONCE(iter->leftover);
3681 /* can't go backwards */
3686 ent = trace_find_next_entry_inc(iter);
3690 while (ent && iter->idx < i)
3691 ent = trace_find_next_entry_inc(iter);
3698 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3700 struct ring_buffer_iter *buf_iter;
3701 unsigned long entries = 0;
3704 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3706 buf_iter = trace_buffer_iter(iter, cpu);
3710 ring_buffer_iter_reset(buf_iter);
3713 * We could have the case with the max latency tracers
3714 * that a reset never took place on a cpu. This is evident
3715 * by the timestamp being before the start of the buffer.
3717 while (ring_buffer_iter_peek(buf_iter, &ts)) {
3718 if (ts >= iter->array_buffer->time_start)
3721 ring_buffer_iter_advance(buf_iter);
3724 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3728 * The current tracer is copied to avoid a global locking
3731 static void *s_start(struct seq_file *m, loff_t *pos)
3733 struct trace_iterator *iter = m->private;
3734 struct trace_array *tr = iter->tr;
3735 int cpu_file = iter->cpu_file;
3741 * copy the tracer to avoid using a global lock all around.
3742 * iter->trace is a copy of current_trace, the pointer to the
3743 * name may be used instead of a strcmp(), as iter->trace->name
3744 * will point to the same string as current_trace->name.
3746 mutex_lock(&trace_types_lock);
3747 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3748 *iter->trace = *tr->current_trace;
3749 mutex_unlock(&trace_types_lock);
3751 #ifdef CONFIG_TRACER_MAX_TRACE
3752 if (iter->snapshot && iter->trace->use_max_tr)
3753 return ERR_PTR(-EBUSY);
3756 if (!iter->snapshot)
3757 atomic_inc(&trace_record_taskinfo_disabled);
3759 if (*pos != iter->pos) {
3764 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3765 for_each_tracing_cpu(cpu)
3766 tracing_iter_reset(iter, cpu);
3768 tracing_iter_reset(iter, cpu_file);
3771 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3776 * If we overflowed the seq_file before, then we want
3777 * to just reuse the trace_seq buffer again.
3783 p = s_next(m, p, &l);
3787 trace_event_read_lock();
3788 trace_access_lock(cpu_file);
3792 static void s_stop(struct seq_file *m, void *p)
3794 struct trace_iterator *iter = m->private;
3796 #ifdef CONFIG_TRACER_MAX_TRACE
3797 if (iter->snapshot && iter->trace->use_max_tr)
3801 if (!iter->snapshot)
3802 atomic_dec(&trace_record_taskinfo_disabled);
3804 trace_access_unlock(iter->cpu_file);
3805 trace_event_read_unlock();
3809 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3810 unsigned long *entries, int cpu)
3812 unsigned long count;
3814 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3816 * If this buffer has skipped entries, then we hold all
3817 * entries for the trace and we need to ignore the
3818 * ones before the time stamp.
3820 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3821 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3822 /* total is the same as the entries */
3826 ring_buffer_overrun_cpu(buf->buffer, cpu);
3831 get_total_entries(struct array_buffer *buf,
3832 unsigned long *total, unsigned long *entries)
3840 for_each_tracing_cpu(cpu) {
3841 get_total_entries_cpu(buf, &t, &e, cpu);
3847 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3849 unsigned long total, entries;
3854 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
3859 unsigned long trace_total_entries(struct trace_array *tr)
3861 unsigned long total, entries;
3866 get_total_entries(&tr->array_buffer, &total, &entries);
3871 static void print_lat_help_header(struct seq_file *m)
3873 seq_puts(m, "# _------=> CPU# \n"
3874 "# / _-----=> irqs-off \n"
3875 "# | / _----=> need-resched \n"
3876 "# || / _---=> hardirq/softirq \n"
3877 "# ||| / _--=> preempt-depth \n"
3879 "# cmd pid ||||| time | caller \n"
3880 "# \\ / ||||| \\ | / \n");
3883 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
3885 unsigned long total;
3886 unsigned long entries;
3888 get_total_entries(buf, &total, &entries);
3889 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3890 entries, total, num_online_cpus());
3894 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
3897 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3899 print_event_info(buf, m);
3901 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3902 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3905 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
3908 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3909 const char *space = " ";
3910 int prec = tgid ? 12 : 2;
3912 print_event_info(buf, m);
3914 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3915 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3916 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3917 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3918 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3919 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3920 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3924 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3926 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3927 struct array_buffer *buf = iter->array_buffer;
3928 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3929 struct tracer *type = iter->trace;
3930 unsigned long entries;
3931 unsigned long total;
3932 const char *name = "preemption";
3936 get_total_entries(buf, &total, &entries);
3938 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3940 seq_puts(m, "# -----------------------------------"
3941 "---------------------------------\n");
3942 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3943 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3944 nsecs_to_usecs(data->saved_latency),
3948 #if defined(CONFIG_PREEMPT_NONE)
3950 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3952 #elif defined(CONFIG_PREEMPT)
3954 #elif defined(CONFIG_PREEMPT_RT)
3959 /* These are reserved for later use */
3962 seq_printf(m, " #P:%d)\n", num_online_cpus());
3966 seq_puts(m, "# -----------------\n");
3967 seq_printf(m, "# | task: %.16s-%d "
3968 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3969 data->comm, data->pid,
3970 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3971 data->policy, data->rt_priority);
3972 seq_puts(m, "# -----------------\n");
3974 if (data->critical_start) {
3975 seq_puts(m, "# => started at: ");
3976 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3977 trace_print_seq(m, &iter->seq);
3978 seq_puts(m, "\n# => ended at: ");
3979 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3980 trace_print_seq(m, &iter->seq);
3981 seq_puts(m, "\n#\n");
3987 static void test_cpu_buff_start(struct trace_iterator *iter)
3989 struct trace_seq *s = &iter->seq;
3990 struct trace_array *tr = iter->tr;
3992 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3995 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3998 if (cpumask_available(iter->started) &&
3999 cpumask_test_cpu(iter->cpu, iter->started))
4002 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4005 if (cpumask_available(iter->started))
4006 cpumask_set_cpu(iter->cpu, iter->started);
4008 /* Don't print started cpu buffer for the first entry of the trace */
4010 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4014 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4016 struct trace_array *tr = iter->tr;
4017 struct trace_seq *s = &iter->seq;
4018 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4019 struct trace_entry *entry;
4020 struct trace_event *event;
4024 test_cpu_buff_start(iter);
4026 event = ftrace_find_event(entry->type);
4028 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4029 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4030 trace_print_lat_context(iter);
4032 trace_print_context(iter);
4035 if (trace_seq_has_overflowed(s))
4036 return TRACE_TYPE_PARTIAL_LINE;
4039 return event->funcs->trace(iter, sym_flags, event);
4041 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4043 return trace_handle_return(s);
4046 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4048 struct trace_array *tr = iter->tr;
4049 struct trace_seq *s = &iter->seq;
4050 struct trace_entry *entry;
4051 struct trace_event *event;
4055 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4056 trace_seq_printf(s, "%d %d %llu ",
4057 entry->pid, iter->cpu, iter->ts);
4059 if (trace_seq_has_overflowed(s))
4060 return TRACE_TYPE_PARTIAL_LINE;
4062 event = ftrace_find_event(entry->type);
4064 return event->funcs->raw(iter, 0, event);
4066 trace_seq_printf(s, "%d ?\n", entry->type);
4068 return trace_handle_return(s);
4071 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4073 struct trace_array *tr = iter->tr;
4074 struct trace_seq *s = &iter->seq;
4075 unsigned char newline = '\n';
4076 struct trace_entry *entry;
4077 struct trace_event *event;
4081 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4082 SEQ_PUT_HEX_FIELD(s, entry->pid);
4083 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4084 SEQ_PUT_HEX_FIELD(s, iter->ts);
4085 if (trace_seq_has_overflowed(s))
4086 return TRACE_TYPE_PARTIAL_LINE;
4089 event = ftrace_find_event(entry->type);
4091 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4092 if (ret != TRACE_TYPE_HANDLED)
4096 SEQ_PUT_FIELD(s, newline);
4098 return trace_handle_return(s);
4101 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4103 struct trace_array *tr = iter->tr;
4104 struct trace_seq *s = &iter->seq;
4105 struct trace_entry *entry;
4106 struct trace_event *event;
4110 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4111 SEQ_PUT_FIELD(s, entry->pid);
4112 SEQ_PUT_FIELD(s, iter->cpu);
4113 SEQ_PUT_FIELD(s, iter->ts);
4114 if (trace_seq_has_overflowed(s))
4115 return TRACE_TYPE_PARTIAL_LINE;
4118 event = ftrace_find_event(entry->type);
4119 return event ? event->funcs->binary(iter, 0, event) :
4123 int trace_empty(struct trace_iterator *iter)
4125 struct ring_buffer_iter *buf_iter;
4128 /* If we are looking at one CPU buffer, only check that one */
4129 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4130 cpu = iter->cpu_file;
4131 buf_iter = trace_buffer_iter(iter, cpu);
4133 if (!ring_buffer_iter_empty(buf_iter))
4136 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4142 for_each_tracing_cpu(cpu) {
4143 buf_iter = trace_buffer_iter(iter, cpu);
4145 if (!ring_buffer_iter_empty(buf_iter))
4148 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4156 /* Called with trace_event_read_lock() held. */
4157 enum print_line_t print_trace_line(struct trace_iterator *iter)
4159 struct trace_array *tr = iter->tr;
4160 unsigned long trace_flags = tr->trace_flags;
4161 enum print_line_t ret;
4163 if (iter->lost_events) {
4164 if (iter->lost_events == (unsigned long)-1)
4165 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4168 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4169 iter->cpu, iter->lost_events);
4170 if (trace_seq_has_overflowed(&iter->seq))
4171 return TRACE_TYPE_PARTIAL_LINE;
4174 if (iter->trace && iter->trace->print_line) {
4175 ret = iter->trace->print_line(iter);
4176 if (ret != TRACE_TYPE_UNHANDLED)
4180 if (iter->ent->type == TRACE_BPUTS &&
4181 trace_flags & TRACE_ITER_PRINTK &&
4182 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4183 return trace_print_bputs_msg_only(iter);
4185 if (iter->ent->type == TRACE_BPRINT &&
4186 trace_flags & TRACE_ITER_PRINTK &&
4187 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4188 return trace_print_bprintk_msg_only(iter);
4190 if (iter->ent->type == TRACE_PRINT &&
4191 trace_flags & TRACE_ITER_PRINTK &&
4192 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4193 return trace_print_printk_msg_only(iter);
4195 if (trace_flags & TRACE_ITER_BIN)
4196 return print_bin_fmt(iter);
4198 if (trace_flags & TRACE_ITER_HEX)
4199 return print_hex_fmt(iter);
4201 if (trace_flags & TRACE_ITER_RAW)
4202 return print_raw_fmt(iter);
4204 return print_trace_fmt(iter);
4207 void trace_latency_header(struct seq_file *m)
4209 struct trace_iterator *iter = m->private;
4210 struct trace_array *tr = iter->tr;
4212 /* print nothing if the buffers are empty */
4213 if (trace_empty(iter))
4216 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4217 print_trace_header(m, iter);
4219 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4220 print_lat_help_header(m);
4223 void trace_default_header(struct seq_file *m)
4225 struct trace_iterator *iter = m->private;
4226 struct trace_array *tr = iter->tr;
4227 unsigned long trace_flags = tr->trace_flags;
4229 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4232 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4233 /* print nothing if the buffers are empty */
4234 if (trace_empty(iter))
4236 print_trace_header(m, iter);
4237 if (!(trace_flags & TRACE_ITER_VERBOSE))
4238 print_lat_help_header(m);
4240 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4241 if (trace_flags & TRACE_ITER_IRQ_INFO)
4242 print_func_help_header_irq(iter->array_buffer,
4245 print_func_help_header(iter->array_buffer, m,
4251 static void test_ftrace_alive(struct seq_file *m)
4253 if (!ftrace_is_dead())
4255 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4256 "# MAY BE MISSING FUNCTION EVENTS\n");
4259 #ifdef CONFIG_TRACER_MAX_TRACE
4260 static void show_snapshot_main_help(struct seq_file *m)
4262 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4263 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4264 "# Takes a snapshot of the main buffer.\n"
4265 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4266 "# (Doesn't have to be '2' works with any number that\n"
4267 "# is not a '0' or '1')\n");
4270 static void show_snapshot_percpu_help(struct seq_file *m)
4272 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4273 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4274 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4275 "# Takes a snapshot of the main buffer for this cpu.\n");
4277 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4278 "# Must use main snapshot file to allocate.\n");
4280 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4281 "# (Doesn't have to be '2' works with any number that\n"
4282 "# is not a '0' or '1')\n");
4285 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4287 if (iter->tr->allocated_snapshot)
4288 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4290 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4292 seq_puts(m, "# Snapshot commands:\n");
4293 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4294 show_snapshot_main_help(m);
4296 show_snapshot_percpu_help(m);
4299 /* Should never be called */
4300 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4303 static int s_show(struct seq_file *m, void *v)
4305 struct trace_iterator *iter = v;
4308 if (iter->ent == NULL) {
4310 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4312 test_ftrace_alive(m);
4314 if (iter->snapshot && trace_empty(iter))
4315 print_snapshot_help(m, iter);
4316 else if (iter->trace && iter->trace->print_header)
4317 iter->trace->print_header(m);
4319 trace_default_header(m);
4321 } else if (iter->leftover) {
4323 * If we filled the seq_file buffer earlier, we
4324 * want to just show it now.
4326 ret = trace_print_seq(m, &iter->seq);
4328 /* ret should this time be zero, but you never know */
4329 iter->leftover = ret;
4332 print_trace_line(iter);
4333 ret = trace_print_seq(m, &iter->seq);
4335 * If we overflow the seq_file buffer, then it will
4336 * ask us for this data again at start up.
4338 * ret is 0 if seq_file write succeeded.
4341 iter->leftover = ret;
4348 * Should be used after trace_array_get(), trace_types_lock
4349 * ensures that i_cdev was already initialized.
4351 static inline int tracing_get_cpu(struct inode *inode)
4353 if (inode->i_cdev) /* See trace_create_cpu_file() */
4354 return (long)inode->i_cdev - 1;
4355 return RING_BUFFER_ALL_CPUS;
4358 static const struct seq_operations tracer_seq_ops = {
4365 static struct trace_iterator *
4366 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4368 struct trace_array *tr = inode->i_private;
4369 struct trace_iterator *iter;
4372 if (tracing_disabled)
4373 return ERR_PTR(-ENODEV);
4375 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4377 return ERR_PTR(-ENOMEM);
4379 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4381 if (!iter->buffer_iter)
4385 * trace_find_next_entry() may need to save off iter->ent.
4386 * It will place it into the iter->temp buffer. As most
4387 * events are less than 128, allocate a buffer of that size.
4388 * If one is greater, then trace_find_next_entry() will
4389 * allocate a new buffer to adjust for the bigger iter->ent.
4390 * It's not critical if it fails to get allocated here.
4392 iter->temp = kmalloc(128, GFP_KERNEL);
4394 iter->temp_size = 128;
4397 * trace_event_printf() may need to modify given format
4398 * string to replace %p with %px so that it shows real address
4399 * instead of hash value. However, that is only for the event
4400 * tracing, other tracer may not need. Defer the allocation
4401 * until it is needed.
4407 * We make a copy of the current tracer to avoid concurrent
4408 * changes on it while we are reading.
4410 mutex_lock(&trace_types_lock);
4411 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4415 *iter->trace = *tr->current_trace;
4417 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4422 #ifdef CONFIG_TRACER_MAX_TRACE
4423 /* Currently only the top directory has a snapshot */
4424 if (tr->current_trace->print_max || snapshot)
4425 iter->array_buffer = &tr->max_buffer;
4428 iter->array_buffer = &tr->array_buffer;
4429 iter->snapshot = snapshot;
4431 iter->cpu_file = tracing_get_cpu(inode);
4432 mutex_init(&iter->mutex);
4434 /* Notify the tracer early; before we stop tracing. */
4435 if (iter->trace->open)
4436 iter->trace->open(iter);
4438 /* Annotate start of buffers if we had overruns */
4439 if (ring_buffer_overruns(iter->array_buffer->buffer))
4440 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4442 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4443 if (trace_clocks[tr->clock_id].in_ns)
4444 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4447 * If pause-on-trace is enabled, then stop the trace while
4448 * dumping, unless this is the "snapshot" file
4450 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4451 tracing_stop_tr(tr);
4453 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4454 for_each_tracing_cpu(cpu) {
4455 iter->buffer_iter[cpu] =
4456 ring_buffer_read_prepare(iter->array_buffer->buffer,
4459 ring_buffer_read_prepare_sync();
4460 for_each_tracing_cpu(cpu) {
4461 ring_buffer_read_start(iter->buffer_iter[cpu]);
4462 tracing_iter_reset(iter, cpu);
4465 cpu = iter->cpu_file;
4466 iter->buffer_iter[cpu] =
4467 ring_buffer_read_prepare(iter->array_buffer->buffer,
4469 ring_buffer_read_prepare_sync();
4470 ring_buffer_read_start(iter->buffer_iter[cpu]);
4471 tracing_iter_reset(iter, cpu);
4474 mutex_unlock(&trace_types_lock);
4479 mutex_unlock(&trace_types_lock);
4482 kfree(iter->buffer_iter);
4484 seq_release_private(inode, file);
4485 return ERR_PTR(-ENOMEM);
4488 int tracing_open_generic(struct inode *inode, struct file *filp)
4492 ret = tracing_check_open_get_tr(NULL);
4496 filp->private_data = inode->i_private;
4500 bool tracing_is_disabled(void)
4502 return (tracing_disabled) ? true: false;
4506 * Open and update trace_array ref count.
4507 * Must have the current trace_array passed to it.
4509 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4511 struct trace_array *tr = inode->i_private;
4514 ret = tracing_check_open_get_tr(tr);
4518 filp->private_data = inode->i_private;
4523 static int tracing_release(struct inode *inode, struct file *file)
4525 struct trace_array *tr = inode->i_private;
4526 struct seq_file *m = file->private_data;
4527 struct trace_iterator *iter;
4530 if (!(file->f_mode & FMODE_READ)) {
4531 trace_array_put(tr);
4535 /* Writes do not use seq_file */
4537 mutex_lock(&trace_types_lock);
4539 for_each_tracing_cpu(cpu) {
4540 if (iter->buffer_iter[cpu])
4541 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4544 if (iter->trace && iter->trace->close)
4545 iter->trace->close(iter);
4547 if (!iter->snapshot && tr->stop_count)
4548 /* reenable tracing if it was previously enabled */
4549 tracing_start_tr(tr);
4551 __trace_array_put(tr);
4553 mutex_unlock(&trace_types_lock);
4555 mutex_destroy(&iter->mutex);
4556 free_cpumask_var(iter->started);
4560 kfree(iter->buffer_iter);
4561 seq_release_private(inode, file);
4566 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4568 struct trace_array *tr = inode->i_private;
4570 trace_array_put(tr);
4574 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4576 struct trace_array *tr = inode->i_private;
4578 trace_array_put(tr);
4580 return single_release(inode, file);
4583 static int tracing_open(struct inode *inode, struct file *file)
4585 struct trace_array *tr = inode->i_private;
4586 struct trace_iterator *iter;
4589 ret = tracing_check_open_get_tr(tr);
4593 /* If this file was open for write, then erase contents */
4594 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4595 int cpu = tracing_get_cpu(inode);
4596 struct array_buffer *trace_buf = &tr->array_buffer;
4598 #ifdef CONFIG_TRACER_MAX_TRACE
4599 if (tr->current_trace->print_max)
4600 trace_buf = &tr->max_buffer;
4603 if (cpu == RING_BUFFER_ALL_CPUS)
4604 tracing_reset_online_cpus(trace_buf);
4606 tracing_reset_cpu(trace_buf, cpu);
4609 if (file->f_mode & FMODE_READ) {
4610 iter = __tracing_open(inode, file, false);
4612 ret = PTR_ERR(iter);
4613 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4614 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4618 trace_array_put(tr);
4624 * Some tracers are not suitable for instance buffers.
4625 * A tracer is always available for the global array (toplevel)
4626 * or if it explicitly states that it is.
4629 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4631 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4634 /* Find the next tracer that this trace array may use */
4635 static struct tracer *
4636 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4638 while (t && !trace_ok_for_array(t, tr))
4645 t_next(struct seq_file *m, void *v, loff_t *pos)
4647 struct trace_array *tr = m->private;
4648 struct tracer *t = v;
4653 t = get_tracer_for_array(tr, t->next);
4658 static void *t_start(struct seq_file *m, loff_t *pos)
4660 struct trace_array *tr = m->private;
4664 mutex_lock(&trace_types_lock);
4666 t = get_tracer_for_array(tr, trace_types);
4667 for (; t && l < *pos; t = t_next(m, t, &l))
4673 static void t_stop(struct seq_file *m, void *p)
4675 mutex_unlock(&trace_types_lock);
4678 static int t_show(struct seq_file *m, void *v)
4680 struct tracer *t = v;
4685 seq_puts(m, t->name);
4694 static const struct seq_operations show_traces_seq_ops = {
4701 static int show_traces_open(struct inode *inode, struct file *file)
4703 struct trace_array *tr = inode->i_private;
4707 ret = tracing_check_open_get_tr(tr);
4711 ret = seq_open(file, &show_traces_seq_ops);
4713 trace_array_put(tr);
4717 m = file->private_data;
4723 static int show_traces_release(struct inode *inode, struct file *file)
4725 struct trace_array *tr = inode->i_private;
4727 trace_array_put(tr);
4728 return seq_release(inode, file);
4732 tracing_write_stub(struct file *filp, const char __user *ubuf,
4733 size_t count, loff_t *ppos)
4738 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4742 if (file->f_mode & FMODE_READ)
4743 ret = seq_lseek(file, offset, whence);
4745 file->f_pos = ret = 0;
4750 static const struct file_operations tracing_fops = {
4751 .open = tracing_open,
4753 .write = tracing_write_stub,
4754 .llseek = tracing_lseek,
4755 .release = tracing_release,
4758 static const struct file_operations show_traces_fops = {
4759 .open = show_traces_open,
4761 .llseek = seq_lseek,
4762 .release = show_traces_release,
4766 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4767 size_t count, loff_t *ppos)
4769 struct trace_array *tr = file_inode(filp)->i_private;
4773 len = snprintf(NULL, 0, "%*pb\n",
4774 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4775 mask_str = kmalloc(len, GFP_KERNEL);
4779 len = snprintf(mask_str, len, "%*pb\n",
4780 cpumask_pr_args(tr->tracing_cpumask));
4785 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4793 int tracing_set_cpumask(struct trace_array *tr,
4794 cpumask_var_t tracing_cpumask_new)
4801 local_irq_disable();
4802 arch_spin_lock(&tr->max_lock);
4803 for_each_tracing_cpu(cpu) {
4805 * Increase/decrease the disabled counter if we are
4806 * about to flip a bit in the cpumask:
4808 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4809 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4810 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4811 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4813 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4814 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4815 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4816 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4819 arch_spin_unlock(&tr->max_lock);
4822 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4828 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4829 size_t count, loff_t *ppos)
4831 struct trace_array *tr = file_inode(filp)->i_private;
4832 cpumask_var_t tracing_cpumask_new;
4835 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4838 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4842 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4846 free_cpumask_var(tracing_cpumask_new);
4851 free_cpumask_var(tracing_cpumask_new);
4856 static const struct file_operations tracing_cpumask_fops = {
4857 .open = tracing_open_generic_tr,
4858 .read = tracing_cpumask_read,
4859 .write = tracing_cpumask_write,
4860 .release = tracing_release_generic_tr,
4861 .llseek = generic_file_llseek,
4864 static int tracing_trace_options_show(struct seq_file *m, void *v)
4866 struct tracer_opt *trace_opts;
4867 struct trace_array *tr = m->private;
4871 mutex_lock(&trace_types_lock);
4872 tracer_flags = tr->current_trace->flags->val;
4873 trace_opts = tr->current_trace->flags->opts;
4875 for (i = 0; trace_options[i]; i++) {
4876 if (tr->trace_flags & (1 << i))
4877 seq_printf(m, "%s\n", trace_options[i]);
4879 seq_printf(m, "no%s\n", trace_options[i]);
4882 for (i = 0; trace_opts[i].name; i++) {
4883 if (tracer_flags & trace_opts[i].bit)
4884 seq_printf(m, "%s\n", trace_opts[i].name);
4886 seq_printf(m, "no%s\n", trace_opts[i].name);
4888 mutex_unlock(&trace_types_lock);
4893 static int __set_tracer_option(struct trace_array *tr,
4894 struct tracer_flags *tracer_flags,
4895 struct tracer_opt *opts, int neg)
4897 struct tracer *trace = tracer_flags->trace;
4900 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4905 tracer_flags->val &= ~opts->bit;
4907 tracer_flags->val |= opts->bit;
4911 /* Try to assign a tracer specific option */
4912 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4914 struct tracer *trace = tr->current_trace;
4915 struct tracer_flags *tracer_flags = trace->flags;
4916 struct tracer_opt *opts = NULL;
4919 for (i = 0; tracer_flags->opts[i].name; i++) {
4920 opts = &tracer_flags->opts[i];
4922 if (strcmp(cmp, opts->name) == 0)
4923 return __set_tracer_option(tr, trace->flags, opts, neg);
4929 /* Some tracers require overwrite to stay enabled */
4930 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4932 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4938 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4940 if ((mask == TRACE_ITER_RECORD_TGID) ||
4941 (mask == TRACE_ITER_RECORD_CMD))
4942 lockdep_assert_held(&event_mutex);
4944 /* do nothing if flag is already set */
4945 if (!!(tr->trace_flags & mask) == !!enabled)
4948 /* Give the tracer a chance to approve the change */
4949 if (tr->current_trace->flag_changed)
4950 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4954 tr->trace_flags |= mask;
4956 tr->trace_flags &= ~mask;
4958 if (mask == TRACE_ITER_RECORD_CMD)
4959 trace_event_enable_cmd_record(enabled);
4961 if (mask == TRACE_ITER_RECORD_TGID) {
4963 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
4967 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4971 trace_event_enable_tgid_record(enabled);
4974 if (mask == TRACE_ITER_EVENT_FORK)
4975 trace_event_follow_fork(tr, enabled);
4977 if (mask == TRACE_ITER_FUNC_FORK)
4978 ftrace_pid_follow_fork(tr, enabled);
4980 if (mask == TRACE_ITER_OVERWRITE) {
4981 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
4982 #ifdef CONFIG_TRACER_MAX_TRACE
4983 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4987 if (mask == TRACE_ITER_PRINTK) {
4988 trace_printk_start_stop_comm(enabled);
4989 trace_printk_control(enabled);
4995 int trace_set_options(struct trace_array *tr, char *option)
5000 size_t orig_len = strlen(option);
5003 cmp = strstrip(option);
5005 len = str_has_prefix(cmp, "no");
5011 mutex_lock(&event_mutex);
5012 mutex_lock(&trace_types_lock);
5014 ret = match_string(trace_options, -1, cmp);
5015 /* If no option could be set, test the specific tracer options */
5017 ret = set_tracer_option(tr, cmp, neg);
5019 ret = set_tracer_flag(tr, 1 << ret, !neg);
5021 mutex_unlock(&trace_types_lock);
5022 mutex_unlock(&event_mutex);
5025 * If the first trailing whitespace is replaced with '\0' by strstrip,
5026 * turn it back into a space.
5028 if (orig_len > strlen(option))
5029 option[strlen(option)] = ' ';
5034 static void __init apply_trace_boot_options(void)
5036 char *buf = trace_boot_options_buf;
5040 option = strsep(&buf, ",");
5046 trace_set_options(&global_trace, option);
5048 /* Put back the comma to allow this to be called again */
5055 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5056 size_t cnt, loff_t *ppos)
5058 struct seq_file *m = filp->private_data;
5059 struct trace_array *tr = m->private;
5063 if (cnt >= sizeof(buf))
5066 if (copy_from_user(buf, ubuf, cnt))
5071 ret = trace_set_options(tr, buf);
5080 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5082 struct trace_array *tr = inode->i_private;
5085 ret = tracing_check_open_get_tr(tr);
5089 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5091 trace_array_put(tr);
5096 static const struct file_operations tracing_iter_fops = {
5097 .open = tracing_trace_options_open,
5099 .llseek = seq_lseek,
5100 .release = tracing_single_release_tr,
5101 .write = tracing_trace_options_write,
5104 static const char readme_msg[] =
5105 "tracing mini-HOWTO:\n\n"
5106 "# echo 0 > tracing_on : quick way to disable tracing\n"
5107 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5108 " Important files:\n"
5109 " trace\t\t\t- The static contents of the buffer\n"
5110 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5111 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5112 " current_tracer\t- function and latency tracers\n"
5113 " available_tracers\t- list of configured tracers for current_tracer\n"
5114 " error_log\t- error log for failed commands (that support it)\n"
5115 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5116 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5117 " trace_clock\t\t-change the clock used to order events\n"
5118 " local: Per cpu clock but may not be synced across CPUs\n"
5119 " global: Synced across CPUs but slows tracing down.\n"
5120 " counter: Not a clock, but just an increment\n"
5121 " uptime: Jiffy counter from time of boot\n"
5122 " perf: Same clock that perf events use\n"
5123 #ifdef CONFIG_X86_64
5124 " x86-tsc: TSC cycle counter\n"
5126 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5127 " delta: Delta difference against a buffer-wide timestamp\n"
5128 " absolute: Absolute (standalone) timestamp\n"
5129 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5130 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5131 " tracing_cpumask\t- Limit which CPUs to trace\n"
5132 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5133 "\t\t\t Remove sub-buffer with rmdir\n"
5134 " trace_options\t\t- Set format or modify how tracing happens\n"
5135 "\t\t\t Disable an option by prefixing 'no' to the\n"
5136 "\t\t\t option name\n"
5137 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5138 #ifdef CONFIG_DYNAMIC_FTRACE
5139 "\n available_filter_functions - list of functions that can be filtered on\n"
5140 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5141 "\t\t\t functions\n"
5142 "\t accepts: func_full_name or glob-matching-pattern\n"
5143 "\t modules: Can select a group via module\n"
5144 "\t Format: :mod:<module-name>\n"
5145 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5146 "\t triggers: a command to perform when function is hit\n"
5147 "\t Format: <function>:<trigger>[:count]\n"
5148 "\t trigger: traceon, traceoff\n"
5149 "\t\t enable_event:<system>:<event>\n"
5150 "\t\t disable_event:<system>:<event>\n"
5151 #ifdef CONFIG_STACKTRACE
5154 #ifdef CONFIG_TRACER_SNAPSHOT
5159 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5160 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5161 "\t The first one will disable tracing every time do_fault is hit\n"
5162 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5163 "\t The first time do trap is hit and it disables tracing, the\n"
5164 "\t counter will decrement to 2. If tracing is already disabled,\n"
5165 "\t the counter will not decrement. It only decrements when the\n"
5166 "\t trigger did work\n"
5167 "\t To remove trigger without count:\n"
5168 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5169 "\t To remove trigger with a count:\n"
5170 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5171 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5172 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5173 "\t modules: Can select a group via module command :mod:\n"
5174 "\t Does not accept triggers\n"
5175 #endif /* CONFIG_DYNAMIC_FTRACE */
5176 #ifdef CONFIG_FUNCTION_TRACER
5177 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5179 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5182 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5183 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5184 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5185 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5187 #ifdef CONFIG_TRACER_SNAPSHOT
5188 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5189 "\t\t\t snapshot buffer. Read the contents for more\n"
5190 "\t\t\t information\n"
5192 #ifdef CONFIG_STACK_TRACER
5193 " stack_trace\t\t- Shows the max stack trace when active\n"
5194 " stack_max_size\t- Shows current max stack size that was traced\n"
5195 "\t\t\t Write into this file to reset the max size (trigger a\n"
5196 "\t\t\t new trace)\n"
5197 #ifdef CONFIG_DYNAMIC_FTRACE
5198 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5201 #endif /* CONFIG_STACK_TRACER */
5202 #ifdef CONFIG_DYNAMIC_EVENTS
5203 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5204 "\t\t\t Write into this file to define/undefine new trace events.\n"
5206 #ifdef CONFIG_KPROBE_EVENTS
5207 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5208 "\t\t\t Write into this file to define/undefine new trace events.\n"
5210 #ifdef CONFIG_UPROBE_EVENTS
5211 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5212 "\t\t\t Write into this file to define/undefine new trace events.\n"
5214 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5215 "\t accepts: event-definitions (one definition per line)\n"
5216 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5217 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5218 #ifdef CONFIG_HIST_TRIGGERS
5219 "\t s:[synthetic/]<event> <field> [<field>]\n"
5221 "\t -:[<group>/]<event>\n"
5222 #ifdef CONFIG_KPROBE_EVENTS
5223 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5224 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5226 #ifdef CONFIG_UPROBE_EVENTS
5227 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5229 "\t args: <name>=fetcharg[:type]\n"
5230 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5231 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5232 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5234 "\t $stack<index>, $stack, $retval, $comm,\n"
5236 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5237 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5238 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5239 "\t <type>\\[<array-size>\\]\n"
5240 #ifdef CONFIG_HIST_TRIGGERS
5241 "\t field: <stype> <name>;\n"
5242 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5243 "\t [unsigned] char/int/long\n"
5246 " events/\t\t- Directory containing all trace event subsystems:\n"
5247 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5248 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5249 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5251 " filter\t\t- If set, only events passing filter are traced\n"
5252 " events/<system>/<event>/\t- Directory containing control files for\n"
5254 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5255 " filter\t\t- If set, only events passing filter are traced\n"
5256 " trigger\t\t- If set, a command to perform when event is hit\n"
5257 "\t Format: <trigger>[:count][if <filter>]\n"
5258 "\t trigger: traceon, traceoff\n"
5259 "\t enable_event:<system>:<event>\n"
5260 "\t disable_event:<system>:<event>\n"
5261 #ifdef CONFIG_HIST_TRIGGERS
5262 "\t enable_hist:<system>:<event>\n"
5263 "\t disable_hist:<system>:<event>\n"
5265 #ifdef CONFIG_STACKTRACE
5268 #ifdef CONFIG_TRACER_SNAPSHOT
5271 #ifdef CONFIG_HIST_TRIGGERS
5272 "\t\t hist (see below)\n"
5274 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5275 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5276 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5277 "\t events/block/block_unplug/trigger\n"
5278 "\t The first disables tracing every time block_unplug is hit.\n"
5279 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5280 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5281 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5282 "\t Like function triggers, the counter is only decremented if it\n"
5283 "\t enabled or disabled tracing.\n"
5284 "\t To remove a trigger without a count:\n"
5285 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5286 "\t To remove a trigger with a count:\n"
5287 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5288 "\t Filters can be ignored when removing a trigger.\n"
5289 #ifdef CONFIG_HIST_TRIGGERS
5290 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5291 "\t Format: hist:keys=<field1[,field2,...]>\n"
5292 "\t [:values=<field1[,field2,...]>]\n"
5293 "\t [:sort=<field1[,field2,...]>]\n"
5294 "\t [:size=#entries]\n"
5295 "\t [:pause][:continue][:clear]\n"
5296 "\t [:name=histname1]\n"
5297 "\t [:<handler>.<action>]\n"
5298 "\t [if <filter>]\n\n"
5299 "\t When a matching event is hit, an entry is added to a hash\n"
5300 "\t table using the key(s) and value(s) named, and the value of a\n"
5301 "\t sum called 'hitcount' is incremented. Keys and values\n"
5302 "\t correspond to fields in the event's format description. Keys\n"
5303 "\t can be any field, or the special string 'stacktrace'.\n"
5304 "\t Compound keys consisting of up to two fields can be specified\n"
5305 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5306 "\t fields. Sort keys consisting of up to two fields can be\n"
5307 "\t specified using the 'sort' keyword. The sort direction can\n"
5308 "\t be modified by appending '.descending' or '.ascending' to a\n"
5309 "\t sort field. The 'size' parameter can be used to specify more\n"
5310 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5311 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5312 "\t its histogram data will be shared with other triggers of the\n"
5313 "\t same name, and trigger hits will update this common data.\n\n"
5314 "\t Reading the 'hist' file for the event will dump the hash\n"
5315 "\t table in its entirety to stdout. If there are multiple hist\n"
5316 "\t triggers attached to an event, there will be a table for each\n"
5317 "\t trigger in the output. The table displayed for a named\n"
5318 "\t trigger will be the same as any other instance having the\n"
5319 "\t same name. The default format used to display a given field\n"
5320 "\t can be modified by appending any of the following modifiers\n"
5321 "\t to the field name, as applicable:\n\n"
5322 "\t .hex display a number as a hex value\n"
5323 "\t .sym display an address as a symbol\n"
5324 "\t .sym-offset display an address as a symbol and offset\n"
5325 "\t .execname display a common_pid as a program name\n"
5326 "\t .syscall display a syscall id as a syscall name\n"
5327 "\t .log2 display log2 value rather than raw number\n"
5328 "\t .usecs display a common_timestamp in microseconds\n\n"
5329 "\t The 'pause' parameter can be used to pause an existing hist\n"
5330 "\t trigger or to start a hist trigger but not log any events\n"
5331 "\t until told to do so. 'continue' can be used to start or\n"
5332 "\t restart a paused hist trigger.\n\n"
5333 "\t The 'clear' parameter will clear the contents of a running\n"
5334 "\t hist trigger and leave its current paused/active state\n"
5336 "\t The enable_hist and disable_hist triggers can be used to\n"
5337 "\t have one event conditionally start and stop another event's\n"
5338 "\t already-attached hist trigger. The syntax is analogous to\n"
5339 "\t the enable_event and disable_event triggers.\n\n"
5340 "\t Hist trigger handlers and actions are executed whenever a\n"
5341 "\t a histogram entry is added or updated. They take the form:\n\n"
5342 "\t <handler>.<action>\n\n"
5343 "\t The available handlers are:\n\n"
5344 "\t onmatch(matching.event) - invoke on addition or update\n"
5345 "\t onmax(var) - invoke if var exceeds current max\n"
5346 "\t onchange(var) - invoke action if var changes\n\n"
5347 "\t The available actions are:\n\n"
5348 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5349 "\t save(field,...) - save current event fields\n"
5350 #ifdef CONFIG_TRACER_SNAPSHOT
5351 "\t snapshot() - snapshot the trace buffer\n\n"
5353 #ifdef CONFIG_SYNTH_EVENTS
5354 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5355 "\t Write into this file to define/undefine new synthetic events.\n"
5356 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5362 tracing_readme_read(struct file *filp, char __user *ubuf,
5363 size_t cnt, loff_t *ppos)
5365 return simple_read_from_buffer(ubuf, cnt, ppos,
5366 readme_msg, strlen(readme_msg));
5369 static const struct file_operations tracing_readme_fops = {
5370 .open = tracing_open_generic,
5371 .read = tracing_readme_read,
5372 .llseek = generic_file_llseek,
5375 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5379 if (*pos || m->count)
5384 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5385 if (trace_find_tgid(*ptr))
5392 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5402 v = saved_tgids_next(m, v, &l);
5410 static void saved_tgids_stop(struct seq_file *m, void *v)
5414 static int saved_tgids_show(struct seq_file *m, void *v)
5416 int pid = (int *)v - tgid_map;
5418 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5422 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5423 .start = saved_tgids_start,
5424 .stop = saved_tgids_stop,
5425 .next = saved_tgids_next,
5426 .show = saved_tgids_show,
5429 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5433 ret = tracing_check_open_get_tr(NULL);
5437 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5441 static const struct file_operations tracing_saved_tgids_fops = {
5442 .open = tracing_saved_tgids_open,
5444 .llseek = seq_lseek,
5445 .release = seq_release,
5448 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5450 unsigned int *ptr = v;
5452 if (*pos || m->count)
5457 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5459 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5468 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5474 arch_spin_lock(&trace_cmdline_lock);
5476 v = &savedcmd->map_cmdline_to_pid[0];
5478 v = saved_cmdlines_next(m, v, &l);
5486 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5488 arch_spin_unlock(&trace_cmdline_lock);
5492 static int saved_cmdlines_show(struct seq_file *m, void *v)
5494 char buf[TASK_COMM_LEN];
5495 unsigned int *pid = v;
5497 __trace_find_cmdline(*pid, buf);
5498 seq_printf(m, "%d %s\n", *pid, buf);
5502 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5503 .start = saved_cmdlines_start,
5504 .next = saved_cmdlines_next,
5505 .stop = saved_cmdlines_stop,
5506 .show = saved_cmdlines_show,
5509 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5513 ret = tracing_check_open_get_tr(NULL);
5517 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5520 static const struct file_operations tracing_saved_cmdlines_fops = {
5521 .open = tracing_saved_cmdlines_open,
5523 .llseek = seq_lseek,
5524 .release = seq_release,
5528 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5529 size_t cnt, loff_t *ppos)
5534 arch_spin_lock(&trace_cmdline_lock);
5535 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5536 arch_spin_unlock(&trace_cmdline_lock);
5538 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5541 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5543 kfree(s->saved_cmdlines);
5544 kfree(s->map_cmdline_to_pid);
5548 static int tracing_resize_saved_cmdlines(unsigned int val)
5550 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5552 s = kmalloc(sizeof(*s), GFP_KERNEL);
5556 if (allocate_cmdlines_buffer(val, s) < 0) {
5561 arch_spin_lock(&trace_cmdline_lock);
5562 savedcmd_temp = savedcmd;
5564 arch_spin_unlock(&trace_cmdline_lock);
5565 free_saved_cmdlines_buffer(savedcmd_temp);
5571 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5572 size_t cnt, loff_t *ppos)
5577 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5581 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5582 if (!val || val > PID_MAX_DEFAULT)
5585 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5594 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5595 .open = tracing_open_generic,
5596 .read = tracing_saved_cmdlines_size_read,
5597 .write = tracing_saved_cmdlines_size_write,
5600 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5601 static union trace_eval_map_item *
5602 update_eval_map(union trace_eval_map_item *ptr)
5604 if (!ptr->map.eval_string) {
5605 if (ptr->tail.next) {
5606 ptr = ptr->tail.next;
5607 /* Set ptr to the next real item (skip head) */
5615 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5617 union trace_eval_map_item *ptr = v;
5620 * Paranoid! If ptr points to end, we don't want to increment past it.
5621 * This really should never happen.
5624 ptr = update_eval_map(ptr);
5625 if (WARN_ON_ONCE(!ptr))
5629 ptr = update_eval_map(ptr);
5634 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5636 union trace_eval_map_item *v;
5639 mutex_lock(&trace_eval_mutex);
5641 v = trace_eval_maps;
5645 while (v && l < *pos) {
5646 v = eval_map_next(m, v, &l);
5652 static void eval_map_stop(struct seq_file *m, void *v)
5654 mutex_unlock(&trace_eval_mutex);
5657 static int eval_map_show(struct seq_file *m, void *v)
5659 union trace_eval_map_item *ptr = v;
5661 seq_printf(m, "%s %ld (%s)\n",
5662 ptr->map.eval_string, ptr->map.eval_value,
5668 static const struct seq_operations tracing_eval_map_seq_ops = {
5669 .start = eval_map_start,
5670 .next = eval_map_next,
5671 .stop = eval_map_stop,
5672 .show = eval_map_show,
5675 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5679 ret = tracing_check_open_get_tr(NULL);
5683 return seq_open(filp, &tracing_eval_map_seq_ops);
5686 static const struct file_operations tracing_eval_map_fops = {
5687 .open = tracing_eval_map_open,
5689 .llseek = seq_lseek,
5690 .release = seq_release,
5693 static inline union trace_eval_map_item *
5694 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5696 /* Return tail of array given the head */
5697 return ptr + ptr->head.length + 1;
5701 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5704 struct trace_eval_map **stop;
5705 struct trace_eval_map **map;
5706 union trace_eval_map_item *map_array;
5707 union trace_eval_map_item *ptr;
5712 * The trace_eval_maps contains the map plus a head and tail item,
5713 * where the head holds the module and length of array, and the
5714 * tail holds a pointer to the next list.
5716 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5718 pr_warn("Unable to allocate trace eval mapping\n");
5722 mutex_lock(&trace_eval_mutex);
5724 if (!trace_eval_maps)
5725 trace_eval_maps = map_array;
5727 ptr = trace_eval_maps;
5729 ptr = trace_eval_jmp_to_tail(ptr);
5730 if (!ptr->tail.next)
5732 ptr = ptr->tail.next;
5735 ptr->tail.next = map_array;
5737 map_array->head.mod = mod;
5738 map_array->head.length = len;
5741 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5742 map_array->map = **map;
5745 memset(map_array, 0, sizeof(*map_array));
5747 mutex_unlock(&trace_eval_mutex);
5750 static void trace_create_eval_file(struct dentry *d_tracer)
5752 trace_create_file("eval_map", 0444, d_tracer,
5753 NULL, &tracing_eval_map_fops);
5756 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5757 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5758 static inline void trace_insert_eval_map_file(struct module *mod,
5759 struct trace_eval_map **start, int len) { }
5760 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5762 static void trace_insert_eval_map(struct module *mod,
5763 struct trace_eval_map **start, int len)
5765 struct trace_eval_map **map;
5772 trace_event_eval_update(map, len);
5774 trace_insert_eval_map_file(mod, start, len);
5778 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5779 size_t cnt, loff_t *ppos)
5781 struct trace_array *tr = filp->private_data;
5782 char buf[MAX_TRACER_SIZE+2];
5785 mutex_lock(&trace_types_lock);
5786 r = sprintf(buf, "%s\n", tr->current_trace->name);
5787 mutex_unlock(&trace_types_lock);
5789 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5792 int tracer_init(struct tracer *t, struct trace_array *tr)
5794 tracing_reset_online_cpus(&tr->array_buffer);
5798 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5802 for_each_tracing_cpu(cpu)
5803 per_cpu_ptr(buf->data, cpu)->entries = val;
5806 #ifdef CONFIG_TRACER_MAX_TRACE
5807 /* resize @tr's buffer to the size of @size_tr's entries */
5808 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5809 struct array_buffer *size_buf, int cpu_id)
5813 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5814 for_each_tracing_cpu(cpu) {
5815 ret = ring_buffer_resize(trace_buf->buffer,
5816 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5819 per_cpu_ptr(trace_buf->data, cpu)->entries =
5820 per_cpu_ptr(size_buf->data, cpu)->entries;
5823 ret = ring_buffer_resize(trace_buf->buffer,
5824 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5826 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5827 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5832 #endif /* CONFIG_TRACER_MAX_TRACE */
5834 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5835 unsigned long size, int cpu)
5840 * If kernel or user changes the size of the ring buffer
5841 * we use the size that was given, and we can forget about
5842 * expanding it later.
5844 ring_buffer_expanded = true;
5846 /* May be called before buffers are initialized */
5847 if (!tr->array_buffer.buffer)
5850 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5854 #ifdef CONFIG_TRACER_MAX_TRACE
5855 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5856 !tr->current_trace->use_max_tr)
5859 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5861 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5862 &tr->array_buffer, cpu);
5865 * AARGH! We are left with different
5866 * size max buffer!!!!
5867 * The max buffer is our "snapshot" buffer.
5868 * When a tracer needs a snapshot (one of the
5869 * latency tracers), it swaps the max buffer
5870 * with the saved snap shot. We succeeded to
5871 * update the size of the main buffer, but failed to
5872 * update the size of the max buffer. But when we tried
5873 * to reset the main buffer to the original size, we
5874 * failed there too. This is very unlikely to
5875 * happen, but if it does, warn and kill all
5879 tracing_disabled = 1;
5884 if (cpu == RING_BUFFER_ALL_CPUS)
5885 set_buffer_entries(&tr->max_buffer, size);
5887 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5890 #endif /* CONFIG_TRACER_MAX_TRACE */
5892 if (cpu == RING_BUFFER_ALL_CPUS)
5893 set_buffer_entries(&tr->array_buffer, size);
5895 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
5900 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5901 unsigned long size, int cpu_id)
5905 mutex_lock(&trace_types_lock);
5907 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5908 /* make sure, this cpu is enabled in the mask */
5909 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5915 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5920 mutex_unlock(&trace_types_lock);
5927 * tracing_update_buffers - used by tracing facility to expand ring buffers
5929 * To save on memory when the tracing is never used on a system with it
5930 * configured in. The ring buffers are set to a minimum size. But once
5931 * a user starts to use the tracing facility, then they need to grow
5932 * to their default size.
5934 * This function is to be called when a tracer is about to be used.
5936 int tracing_update_buffers(void)
5940 mutex_lock(&trace_types_lock);
5941 if (!ring_buffer_expanded)
5942 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5943 RING_BUFFER_ALL_CPUS);
5944 mutex_unlock(&trace_types_lock);
5949 struct trace_option_dentry;
5952 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5955 * Used to clear out the tracer before deletion of an instance.
5956 * Must have trace_types_lock held.
5958 static void tracing_set_nop(struct trace_array *tr)
5960 if (tr->current_trace == &nop_trace)
5963 tr->current_trace->enabled--;
5965 if (tr->current_trace->reset)
5966 tr->current_trace->reset(tr);
5968 tr->current_trace = &nop_trace;
5971 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5973 /* Only enable if the directory has been created already. */
5977 create_trace_option_files(tr, t);
5980 int tracing_set_tracer(struct trace_array *tr, const char *buf)
5983 #ifdef CONFIG_TRACER_MAX_TRACE
5988 mutex_lock(&trace_types_lock);
5990 if (!ring_buffer_expanded) {
5991 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5992 RING_BUFFER_ALL_CPUS);
5998 for (t = trace_types; t; t = t->next) {
5999 if (strcmp(t->name, buf) == 0)
6006 if (t == tr->current_trace)
6009 #ifdef CONFIG_TRACER_SNAPSHOT
6010 if (t->use_max_tr) {
6011 arch_spin_lock(&tr->max_lock);
6012 if (tr->cond_snapshot)
6014 arch_spin_unlock(&tr->max_lock);
6019 /* Some tracers won't work on kernel command line */
6020 if (system_state < SYSTEM_RUNNING && t->noboot) {
6021 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6026 /* Some tracers are only allowed for the top level buffer */
6027 if (!trace_ok_for_array(t, tr)) {
6032 /* If trace pipe files are being read, we can't change the tracer */
6033 if (tr->trace_ref) {
6038 trace_branch_disable();
6040 tr->current_trace->enabled--;
6042 if (tr->current_trace->reset)
6043 tr->current_trace->reset(tr);
6045 /* Current trace needs to be nop_trace before synchronize_rcu */
6046 tr->current_trace = &nop_trace;
6048 #ifdef CONFIG_TRACER_MAX_TRACE
6049 had_max_tr = tr->allocated_snapshot;
6051 if (had_max_tr && !t->use_max_tr) {
6053 * We need to make sure that the update_max_tr sees that
6054 * current_trace changed to nop_trace to keep it from
6055 * swapping the buffers after we resize it.
6056 * The update_max_tr is called from interrupts disabled
6057 * so a synchronized_sched() is sufficient.
6064 #ifdef CONFIG_TRACER_MAX_TRACE
6065 if (t->use_max_tr && !had_max_tr) {
6066 ret = tracing_alloc_snapshot_instance(tr);
6073 ret = tracer_init(t, tr);
6078 tr->current_trace = t;
6079 tr->current_trace->enabled++;
6080 trace_branch_enable(tr);
6082 mutex_unlock(&trace_types_lock);
6088 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6089 size_t cnt, loff_t *ppos)
6091 struct trace_array *tr = filp->private_data;
6092 char buf[MAX_TRACER_SIZE+1];
6099 if (cnt > MAX_TRACER_SIZE)
6100 cnt = MAX_TRACER_SIZE;
6102 if (copy_from_user(buf, ubuf, cnt))
6107 /* strip ending whitespace. */
6108 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6111 err = tracing_set_tracer(tr, buf);
6121 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6122 size_t cnt, loff_t *ppos)
6127 r = snprintf(buf, sizeof(buf), "%ld\n",
6128 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6129 if (r > sizeof(buf))
6131 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6135 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6136 size_t cnt, loff_t *ppos)
6141 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6151 tracing_thresh_read(struct file *filp, char __user *ubuf,
6152 size_t cnt, loff_t *ppos)
6154 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6158 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6159 size_t cnt, loff_t *ppos)
6161 struct trace_array *tr = filp->private_data;
6164 mutex_lock(&trace_types_lock);
6165 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6169 if (tr->current_trace->update_thresh) {
6170 ret = tr->current_trace->update_thresh(tr);
6177 mutex_unlock(&trace_types_lock);
6182 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6185 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6186 size_t cnt, loff_t *ppos)
6188 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6192 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6193 size_t cnt, loff_t *ppos)
6195 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6200 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6202 struct trace_array *tr = inode->i_private;
6203 struct trace_iterator *iter;
6206 ret = tracing_check_open_get_tr(tr);
6210 mutex_lock(&trace_types_lock);
6212 /* create a buffer to store the information to pass to userspace */
6213 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6216 __trace_array_put(tr);
6220 trace_seq_init(&iter->seq);
6221 iter->trace = tr->current_trace;
6223 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6228 /* trace pipe does not show start of buffer */
6229 cpumask_setall(iter->started);
6231 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6232 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6234 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6235 if (trace_clocks[tr->clock_id].in_ns)
6236 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6239 iter->array_buffer = &tr->array_buffer;
6240 iter->cpu_file = tracing_get_cpu(inode);
6241 mutex_init(&iter->mutex);
6242 filp->private_data = iter;
6244 if (iter->trace->pipe_open)
6245 iter->trace->pipe_open(iter);
6247 nonseekable_open(inode, filp);
6251 mutex_unlock(&trace_types_lock);
6256 __trace_array_put(tr);
6257 mutex_unlock(&trace_types_lock);
6261 static int tracing_release_pipe(struct inode *inode, struct file *file)
6263 struct trace_iterator *iter = file->private_data;
6264 struct trace_array *tr = inode->i_private;
6266 mutex_lock(&trace_types_lock);
6270 if (iter->trace->pipe_close)
6271 iter->trace->pipe_close(iter);
6273 mutex_unlock(&trace_types_lock);
6275 free_cpumask_var(iter->started);
6276 mutex_destroy(&iter->mutex);
6279 trace_array_put(tr);
6285 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6287 struct trace_array *tr = iter->tr;
6289 /* Iterators are static, they should be filled or empty */
6290 if (trace_buffer_iter(iter, iter->cpu_file))
6291 return EPOLLIN | EPOLLRDNORM;
6293 if (tr->trace_flags & TRACE_ITER_BLOCK)
6295 * Always select as readable when in blocking mode
6297 return EPOLLIN | EPOLLRDNORM;
6299 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6304 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6306 struct trace_iterator *iter = filp->private_data;
6308 return trace_poll(iter, filp, poll_table);
6311 /* Must be called with iter->mutex held. */
6312 static int tracing_wait_pipe(struct file *filp)
6314 struct trace_iterator *iter = filp->private_data;
6317 while (trace_empty(iter)) {
6319 if ((filp->f_flags & O_NONBLOCK)) {
6324 * We block until we read something and tracing is disabled.
6325 * We still block if tracing is disabled, but we have never
6326 * read anything. This allows a user to cat this file, and
6327 * then enable tracing. But after we have read something,
6328 * we give an EOF when tracing is again disabled.
6330 * iter->pos will be 0 if we haven't read anything.
6332 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6335 mutex_unlock(&iter->mutex);
6337 ret = wait_on_pipe(iter, 0);
6339 mutex_lock(&iter->mutex);
6352 tracing_read_pipe(struct file *filp, char __user *ubuf,
6353 size_t cnt, loff_t *ppos)
6355 struct trace_iterator *iter = filp->private_data;
6359 * Avoid more than one consumer on a single file descriptor
6360 * This is just a matter of traces coherency, the ring buffer itself
6363 mutex_lock(&iter->mutex);
6365 /* return any leftover data */
6366 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6370 trace_seq_init(&iter->seq);
6372 if (iter->trace->read) {
6373 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6379 sret = tracing_wait_pipe(filp);
6383 /* stop when tracing is finished */
6384 if (trace_empty(iter)) {
6389 if (cnt >= PAGE_SIZE)
6390 cnt = PAGE_SIZE - 1;
6392 /* reset all but tr, trace, and overruns */
6393 memset(&iter->seq, 0,
6394 sizeof(struct trace_iterator) -
6395 offsetof(struct trace_iterator, seq));
6396 cpumask_clear(iter->started);
6397 trace_seq_init(&iter->seq);
6400 trace_event_read_lock();
6401 trace_access_lock(iter->cpu_file);
6402 while (trace_find_next_entry_inc(iter) != NULL) {
6403 enum print_line_t ret;
6404 int save_len = iter->seq.seq.len;
6406 ret = print_trace_line(iter);
6407 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6408 /* don't print partial lines */
6409 iter->seq.seq.len = save_len;
6412 if (ret != TRACE_TYPE_NO_CONSUME)
6413 trace_consume(iter);
6415 if (trace_seq_used(&iter->seq) >= cnt)
6419 * Setting the full flag means we reached the trace_seq buffer
6420 * size and we should leave by partial output condition above.
6421 * One of the trace_seq_* functions is not used properly.
6423 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6426 trace_access_unlock(iter->cpu_file);
6427 trace_event_read_unlock();
6429 /* Now copy what we have to the user */
6430 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6431 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6432 trace_seq_init(&iter->seq);
6435 * If there was nothing to send to user, in spite of consuming trace
6436 * entries, go back to wait for more entries.
6442 mutex_unlock(&iter->mutex);
6447 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6450 __free_page(spd->pages[idx]);
6454 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6460 /* Seq buffer is page-sized, exactly what we need. */
6462 save_len = iter->seq.seq.len;
6463 ret = print_trace_line(iter);
6465 if (trace_seq_has_overflowed(&iter->seq)) {
6466 iter->seq.seq.len = save_len;
6471 * This should not be hit, because it should only
6472 * be set if the iter->seq overflowed. But check it
6473 * anyway to be safe.
6475 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6476 iter->seq.seq.len = save_len;
6480 count = trace_seq_used(&iter->seq) - save_len;
6483 iter->seq.seq.len = save_len;
6487 if (ret != TRACE_TYPE_NO_CONSUME)
6488 trace_consume(iter);
6490 if (!trace_find_next_entry_inc(iter)) {
6500 static ssize_t tracing_splice_read_pipe(struct file *filp,
6502 struct pipe_inode_info *pipe,
6506 struct page *pages_def[PIPE_DEF_BUFFERS];
6507 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6508 struct trace_iterator *iter = filp->private_data;
6509 struct splice_pipe_desc spd = {
6511 .partial = partial_def,
6512 .nr_pages = 0, /* This gets updated below. */
6513 .nr_pages_max = PIPE_DEF_BUFFERS,
6514 .ops = &default_pipe_buf_ops,
6515 .spd_release = tracing_spd_release_pipe,
6521 if (splice_grow_spd(pipe, &spd))
6524 mutex_lock(&iter->mutex);
6526 if (iter->trace->splice_read) {
6527 ret = iter->trace->splice_read(iter, filp,
6528 ppos, pipe, len, flags);
6533 ret = tracing_wait_pipe(filp);
6537 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6542 trace_event_read_lock();
6543 trace_access_lock(iter->cpu_file);
6545 /* Fill as many pages as possible. */
6546 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6547 spd.pages[i] = alloc_page(GFP_KERNEL);
6551 rem = tracing_fill_pipe_page(rem, iter);
6553 /* Copy the data into the page, so we can start over. */
6554 ret = trace_seq_to_buffer(&iter->seq,
6555 page_address(spd.pages[i]),
6556 trace_seq_used(&iter->seq));
6558 __free_page(spd.pages[i]);
6561 spd.partial[i].offset = 0;
6562 spd.partial[i].len = trace_seq_used(&iter->seq);
6564 trace_seq_init(&iter->seq);
6567 trace_access_unlock(iter->cpu_file);
6568 trace_event_read_unlock();
6569 mutex_unlock(&iter->mutex);
6574 ret = splice_to_pipe(pipe, &spd);
6578 splice_shrink_spd(&spd);
6582 mutex_unlock(&iter->mutex);
6587 tracing_entries_read(struct file *filp, char __user *ubuf,
6588 size_t cnt, loff_t *ppos)
6590 struct inode *inode = file_inode(filp);
6591 struct trace_array *tr = inode->i_private;
6592 int cpu = tracing_get_cpu(inode);
6597 mutex_lock(&trace_types_lock);
6599 if (cpu == RING_BUFFER_ALL_CPUS) {
6600 int cpu, buf_size_same;
6605 /* check if all cpu sizes are same */
6606 for_each_tracing_cpu(cpu) {
6607 /* fill in the size from first enabled cpu */
6609 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6610 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6616 if (buf_size_same) {
6617 if (!ring_buffer_expanded)
6618 r = sprintf(buf, "%lu (expanded: %lu)\n",
6620 trace_buf_size >> 10);
6622 r = sprintf(buf, "%lu\n", size >> 10);
6624 r = sprintf(buf, "X\n");
6626 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6628 mutex_unlock(&trace_types_lock);
6630 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6635 tracing_entries_write(struct file *filp, const char __user *ubuf,
6636 size_t cnt, loff_t *ppos)
6638 struct inode *inode = file_inode(filp);
6639 struct trace_array *tr = inode->i_private;
6643 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6647 /* must have at least 1 entry */
6651 /* value is in KB */
6653 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6663 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6664 size_t cnt, loff_t *ppos)
6666 struct trace_array *tr = filp->private_data;
6669 unsigned long size = 0, expanded_size = 0;
6671 mutex_lock(&trace_types_lock);
6672 for_each_tracing_cpu(cpu) {
6673 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6674 if (!ring_buffer_expanded)
6675 expanded_size += trace_buf_size >> 10;
6677 if (ring_buffer_expanded)
6678 r = sprintf(buf, "%lu\n", size);
6680 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6681 mutex_unlock(&trace_types_lock);
6683 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6687 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6688 size_t cnt, loff_t *ppos)
6691 * There is no need to read what the user has written, this function
6692 * is just to make sure that there is no error when "echo" is used
6701 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6703 struct trace_array *tr = inode->i_private;
6705 /* disable tracing ? */
6706 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6707 tracer_tracing_off(tr);
6708 /* resize the ring buffer to 0 */
6709 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6711 trace_array_put(tr);
6717 tracing_mark_write(struct file *filp, const char __user *ubuf,
6718 size_t cnt, loff_t *fpos)
6720 struct trace_array *tr = filp->private_data;
6721 struct ring_buffer_event *event;
6722 enum event_trigger_type tt = ETT_NONE;
6723 struct trace_buffer *buffer;
6724 struct print_entry *entry;
6729 /* Used in tracing_mark_raw_write() as well */
6730 #define FAULTED_STR "<faulted>"
6731 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6733 if (tracing_disabled)
6736 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6739 if (cnt > TRACE_BUF_SIZE)
6740 cnt = TRACE_BUF_SIZE;
6742 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6744 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6746 /* If less than "<faulted>", then make sure we can still add that */
6747 if (cnt < FAULTED_SIZE)
6748 size += FAULTED_SIZE - cnt;
6750 buffer = tr->array_buffer.buffer;
6751 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6753 if (unlikely(!event))
6754 /* Ring buffer disabled, return as if not open for write */
6757 entry = ring_buffer_event_data(event);
6758 entry->ip = _THIS_IP_;
6760 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6762 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6768 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6769 /* do not add \n before testing triggers, but add \0 */
6770 entry->buf[cnt] = '\0';
6771 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6774 if (entry->buf[cnt - 1] != '\n') {
6775 entry->buf[cnt] = '\n';
6776 entry->buf[cnt + 1] = '\0';
6778 entry->buf[cnt] = '\0';
6780 if (static_branch_unlikely(&trace_marker_exports_enabled))
6781 ftrace_exports(event, TRACE_EXPORT_MARKER);
6782 __buffer_unlock_commit(buffer, event);
6785 event_triggers_post_call(tr->trace_marker_file, tt);
6793 /* Limit it for now to 3K (including tag) */
6794 #define RAW_DATA_MAX_SIZE (1024*3)
6797 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6798 size_t cnt, loff_t *fpos)
6800 struct trace_array *tr = filp->private_data;
6801 struct ring_buffer_event *event;
6802 struct trace_buffer *buffer;
6803 struct raw_data_entry *entry;
6808 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6810 if (tracing_disabled)
6813 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6816 /* The marker must at least have a tag id */
6817 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6820 if (cnt > TRACE_BUF_SIZE)
6821 cnt = TRACE_BUF_SIZE;
6823 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6825 size = sizeof(*entry) + cnt;
6826 if (cnt < FAULT_SIZE_ID)
6827 size += FAULT_SIZE_ID - cnt;
6829 buffer = tr->array_buffer.buffer;
6830 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6833 /* Ring buffer disabled, return as if not open for write */
6836 entry = ring_buffer_event_data(event);
6838 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6841 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6846 __buffer_unlock_commit(buffer, event);
6854 static int tracing_clock_show(struct seq_file *m, void *v)
6856 struct trace_array *tr = m->private;
6859 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6861 "%s%s%s%s", i ? " " : "",
6862 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6863 i == tr->clock_id ? "]" : "");
6869 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6873 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6874 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6877 if (i == ARRAY_SIZE(trace_clocks))
6880 mutex_lock(&trace_types_lock);
6884 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6887 * New clock may not be consistent with the previous clock.
6888 * Reset the buffer so that it doesn't have incomparable timestamps.
6890 tracing_reset_online_cpus(&tr->array_buffer);
6892 #ifdef CONFIG_TRACER_MAX_TRACE
6893 if (tr->max_buffer.buffer)
6894 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6895 tracing_reset_online_cpus(&tr->max_buffer);
6898 mutex_unlock(&trace_types_lock);
6903 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6904 size_t cnt, loff_t *fpos)
6906 struct seq_file *m = filp->private_data;
6907 struct trace_array *tr = m->private;
6909 const char *clockstr;
6912 if (cnt >= sizeof(buf))
6915 if (copy_from_user(buf, ubuf, cnt))
6920 clockstr = strstrip(buf);
6922 ret = tracing_set_clock(tr, clockstr);
6931 static int tracing_clock_open(struct inode *inode, struct file *file)
6933 struct trace_array *tr = inode->i_private;
6936 ret = tracing_check_open_get_tr(tr);
6940 ret = single_open(file, tracing_clock_show, inode->i_private);
6942 trace_array_put(tr);
6947 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6949 struct trace_array *tr = m->private;
6951 mutex_lock(&trace_types_lock);
6953 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
6954 seq_puts(m, "delta [absolute]\n");
6956 seq_puts(m, "[delta] absolute\n");
6958 mutex_unlock(&trace_types_lock);
6963 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6965 struct trace_array *tr = inode->i_private;
6968 ret = tracing_check_open_get_tr(tr);
6972 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6974 trace_array_put(tr);
6979 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6983 mutex_lock(&trace_types_lock);
6985 if (abs && tr->time_stamp_abs_ref++)
6989 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6994 if (--tr->time_stamp_abs_ref)
6998 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
7000 #ifdef CONFIG_TRACER_MAX_TRACE
7001 if (tr->max_buffer.buffer)
7002 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
7005 mutex_unlock(&trace_types_lock);
7010 struct ftrace_buffer_info {
7011 struct trace_iterator iter;
7013 unsigned int spare_cpu;
7017 #ifdef CONFIG_TRACER_SNAPSHOT
7018 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7020 struct trace_array *tr = inode->i_private;
7021 struct trace_iterator *iter;
7025 ret = tracing_check_open_get_tr(tr);
7029 if (file->f_mode & FMODE_READ) {
7030 iter = __tracing_open(inode, file, true);
7032 ret = PTR_ERR(iter);
7034 /* Writes still need the seq_file to hold the private data */
7036 m = kzalloc(sizeof(*m), GFP_KERNEL);
7039 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7047 iter->array_buffer = &tr->max_buffer;
7048 iter->cpu_file = tracing_get_cpu(inode);
7050 file->private_data = m;
7054 trace_array_put(tr);
7060 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7063 struct seq_file *m = filp->private_data;
7064 struct trace_iterator *iter = m->private;
7065 struct trace_array *tr = iter->tr;
7069 ret = tracing_update_buffers();
7073 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7077 mutex_lock(&trace_types_lock);
7079 if (tr->current_trace->use_max_tr) {
7084 arch_spin_lock(&tr->max_lock);
7085 if (tr->cond_snapshot)
7087 arch_spin_unlock(&tr->max_lock);
7093 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7097 if (tr->allocated_snapshot)
7101 /* Only allow per-cpu swap if the ring buffer supports it */
7102 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7103 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7108 if (tr->allocated_snapshot)
7109 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7110 &tr->array_buffer, iter->cpu_file);
7112 ret = tracing_alloc_snapshot_instance(tr);
7115 local_irq_disable();
7116 /* Now, we're going to swap */
7117 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7118 update_max_tr(tr, current, smp_processor_id(), NULL);
7120 update_max_tr_single(tr, current, iter->cpu_file);
7124 if (tr->allocated_snapshot) {
7125 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7126 tracing_reset_online_cpus(&tr->max_buffer);
7128 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7138 mutex_unlock(&trace_types_lock);
7142 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7144 struct seq_file *m = file->private_data;
7147 ret = tracing_release(inode, file);
7149 if (file->f_mode & FMODE_READ)
7152 /* If write only, the seq_file is just a stub */
7160 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7161 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7162 size_t count, loff_t *ppos);
7163 static int tracing_buffers_release(struct inode *inode, struct file *file);
7164 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7165 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7167 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7169 struct ftrace_buffer_info *info;
7172 /* The following checks for tracefs lockdown */
7173 ret = tracing_buffers_open(inode, filp);
7177 info = filp->private_data;
7179 if (info->iter.trace->use_max_tr) {
7180 tracing_buffers_release(inode, filp);
7184 info->iter.snapshot = true;
7185 info->iter.array_buffer = &info->iter.tr->max_buffer;
7190 #endif /* CONFIG_TRACER_SNAPSHOT */
7193 static const struct file_operations tracing_thresh_fops = {
7194 .open = tracing_open_generic,
7195 .read = tracing_thresh_read,
7196 .write = tracing_thresh_write,
7197 .llseek = generic_file_llseek,
7200 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7201 static const struct file_operations tracing_max_lat_fops = {
7202 .open = tracing_open_generic,
7203 .read = tracing_max_lat_read,
7204 .write = tracing_max_lat_write,
7205 .llseek = generic_file_llseek,
7209 static const struct file_operations set_tracer_fops = {
7210 .open = tracing_open_generic,
7211 .read = tracing_set_trace_read,
7212 .write = tracing_set_trace_write,
7213 .llseek = generic_file_llseek,
7216 static const struct file_operations tracing_pipe_fops = {
7217 .open = tracing_open_pipe,
7218 .poll = tracing_poll_pipe,
7219 .read = tracing_read_pipe,
7220 .splice_read = tracing_splice_read_pipe,
7221 .release = tracing_release_pipe,
7222 .llseek = no_llseek,
7225 static const struct file_operations tracing_entries_fops = {
7226 .open = tracing_open_generic_tr,
7227 .read = tracing_entries_read,
7228 .write = tracing_entries_write,
7229 .llseek = generic_file_llseek,
7230 .release = tracing_release_generic_tr,
7233 static const struct file_operations tracing_total_entries_fops = {
7234 .open = tracing_open_generic_tr,
7235 .read = tracing_total_entries_read,
7236 .llseek = generic_file_llseek,
7237 .release = tracing_release_generic_tr,
7240 static const struct file_operations tracing_free_buffer_fops = {
7241 .open = tracing_open_generic_tr,
7242 .write = tracing_free_buffer_write,
7243 .release = tracing_free_buffer_release,
7246 static const struct file_operations tracing_mark_fops = {
7247 .open = tracing_open_generic_tr,
7248 .write = tracing_mark_write,
7249 .llseek = generic_file_llseek,
7250 .release = tracing_release_generic_tr,
7253 static const struct file_operations tracing_mark_raw_fops = {
7254 .open = tracing_open_generic_tr,
7255 .write = tracing_mark_raw_write,
7256 .llseek = generic_file_llseek,
7257 .release = tracing_release_generic_tr,
7260 static const struct file_operations trace_clock_fops = {
7261 .open = tracing_clock_open,
7263 .llseek = seq_lseek,
7264 .release = tracing_single_release_tr,
7265 .write = tracing_clock_write,
7268 static const struct file_operations trace_time_stamp_mode_fops = {
7269 .open = tracing_time_stamp_mode_open,
7271 .llseek = seq_lseek,
7272 .release = tracing_single_release_tr,
7275 #ifdef CONFIG_TRACER_SNAPSHOT
7276 static const struct file_operations snapshot_fops = {
7277 .open = tracing_snapshot_open,
7279 .write = tracing_snapshot_write,
7280 .llseek = tracing_lseek,
7281 .release = tracing_snapshot_release,
7284 static const struct file_operations snapshot_raw_fops = {
7285 .open = snapshot_raw_open,
7286 .read = tracing_buffers_read,
7287 .release = tracing_buffers_release,
7288 .splice_read = tracing_buffers_splice_read,
7289 .llseek = no_llseek,
7292 #endif /* CONFIG_TRACER_SNAPSHOT */
7294 #define TRACING_LOG_ERRS_MAX 8
7295 #define TRACING_LOG_LOC_MAX 128
7297 #define CMD_PREFIX " Command: "
7300 const char **errs; /* ptr to loc-specific array of err strings */
7301 u8 type; /* index into errs -> specific err string */
7302 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7306 struct tracing_log_err {
7307 struct list_head list;
7308 struct err_info info;
7309 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7310 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7313 static DEFINE_MUTEX(tracing_err_log_lock);
7315 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7317 struct tracing_log_err *err;
7319 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7320 err = kzalloc(sizeof(*err), GFP_KERNEL);
7322 err = ERR_PTR(-ENOMEM);
7323 tr->n_err_log_entries++;
7328 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7329 list_del(&err->list);
7335 * err_pos - find the position of a string within a command for error careting
7336 * @cmd: The tracing command that caused the error
7337 * @str: The string to position the caret at within @cmd
7339 * Finds the position of the first occurence of @str within @cmd. The
7340 * return value can be passed to tracing_log_err() for caret placement
7343 * Returns the index within @cmd of the first occurence of @str or 0
7344 * if @str was not found.
7346 unsigned int err_pos(char *cmd, const char *str)
7350 if (WARN_ON(!strlen(cmd)))
7353 found = strstr(cmd, str);
7361 * tracing_log_err - write an error to the tracing error log
7362 * @tr: The associated trace array for the error (NULL for top level array)
7363 * @loc: A string describing where the error occurred
7364 * @cmd: The tracing command that caused the error
7365 * @errs: The array of loc-specific static error strings
7366 * @type: The index into errs[], which produces the specific static err string
7367 * @pos: The position the caret should be placed in the cmd
7369 * Writes an error into tracing/error_log of the form:
7371 * <loc>: error: <text>
7375 * tracing/error_log is a small log file containing the last
7376 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7377 * unless there has been a tracing error, and the error log can be
7378 * cleared and have its memory freed by writing the empty string in
7379 * truncation mode to it i.e. echo > tracing/error_log.
7381 * NOTE: the @errs array along with the @type param are used to
7382 * produce a static error string - this string is not copied and saved
7383 * when the error is logged - only a pointer to it is saved. See
7384 * existing callers for examples of how static strings are typically
7385 * defined for use with tracing_log_err().
7387 void tracing_log_err(struct trace_array *tr,
7388 const char *loc, const char *cmd,
7389 const char **errs, u8 type, u8 pos)
7391 struct tracing_log_err *err;
7396 mutex_lock(&tracing_err_log_lock);
7397 err = get_tracing_log_err(tr);
7398 if (PTR_ERR(err) == -ENOMEM) {
7399 mutex_unlock(&tracing_err_log_lock);
7403 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7404 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7406 err->info.errs = errs;
7407 err->info.type = type;
7408 err->info.pos = pos;
7409 err->info.ts = local_clock();
7411 list_add_tail(&err->list, &tr->err_log);
7412 mutex_unlock(&tracing_err_log_lock);
7415 static void clear_tracing_err_log(struct trace_array *tr)
7417 struct tracing_log_err *err, *next;
7419 mutex_lock(&tracing_err_log_lock);
7420 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7421 list_del(&err->list);
7425 tr->n_err_log_entries = 0;
7426 mutex_unlock(&tracing_err_log_lock);
7429 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7431 struct trace_array *tr = m->private;
7433 mutex_lock(&tracing_err_log_lock);
7435 return seq_list_start(&tr->err_log, *pos);
7438 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7440 struct trace_array *tr = m->private;
7442 return seq_list_next(v, &tr->err_log, pos);
7445 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7447 mutex_unlock(&tracing_err_log_lock);
7450 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7454 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7456 for (i = 0; i < pos; i++)
7461 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7463 struct tracing_log_err *err = v;
7466 const char *err_text = err->info.errs[err->info.type];
7467 u64 sec = err->info.ts;
7470 nsec = do_div(sec, NSEC_PER_SEC);
7471 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7472 err->loc, err_text);
7473 seq_printf(m, "%s", err->cmd);
7474 tracing_err_log_show_pos(m, err->info.pos);
7480 static const struct seq_operations tracing_err_log_seq_ops = {
7481 .start = tracing_err_log_seq_start,
7482 .next = tracing_err_log_seq_next,
7483 .stop = tracing_err_log_seq_stop,
7484 .show = tracing_err_log_seq_show
7487 static int tracing_err_log_open(struct inode *inode, struct file *file)
7489 struct trace_array *tr = inode->i_private;
7492 ret = tracing_check_open_get_tr(tr);
7496 /* If this file was opened for write, then erase contents */
7497 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7498 clear_tracing_err_log(tr);
7500 if (file->f_mode & FMODE_READ) {
7501 ret = seq_open(file, &tracing_err_log_seq_ops);
7503 struct seq_file *m = file->private_data;
7506 trace_array_put(tr);
7512 static ssize_t tracing_err_log_write(struct file *file,
7513 const char __user *buffer,
7514 size_t count, loff_t *ppos)
7519 static int tracing_err_log_release(struct inode *inode, struct file *file)
7521 struct trace_array *tr = inode->i_private;
7523 trace_array_put(tr);
7525 if (file->f_mode & FMODE_READ)
7526 seq_release(inode, file);
7531 static const struct file_operations tracing_err_log_fops = {
7532 .open = tracing_err_log_open,
7533 .write = tracing_err_log_write,
7535 .llseek = seq_lseek,
7536 .release = tracing_err_log_release,
7539 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7541 struct trace_array *tr = inode->i_private;
7542 struct ftrace_buffer_info *info;
7545 ret = tracing_check_open_get_tr(tr);
7549 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7551 trace_array_put(tr);
7555 mutex_lock(&trace_types_lock);
7558 info->iter.cpu_file = tracing_get_cpu(inode);
7559 info->iter.trace = tr->current_trace;
7560 info->iter.array_buffer = &tr->array_buffer;
7562 /* Force reading ring buffer for first read */
7563 info->read = (unsigned int)-1;
7565 filp->private_data = info;
7569 mutex_unlock(&trace_types_lock);
7571 ret = nonseekable_open(inode, filp);
7573 trace_array_put(tr);
7579 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7581 struct ftrace_buffer_info *info = filp->private_data;
7582 struct trace_iterator *iter = &info->iter;
7584 return trace_poll(iter, filp, poll_table);
7588 tracing_buffers_read(struct file *filp, char __user *ubuf,
7589 size_t count, loff_t *ppos)
7591 struct ftrace_buffer_info *info = filp->private_data;
7592 struct trace_iterator *iter = &info->iter;
7599 #ifdef CONFIG_TRACER_MAX_TRACE
7600 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7605 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7607 if (IS_ERR(info->spare)) {
7608 ret = PTR_ERR(info->spare);
7611 info->spare_cpu = iter->cpu_file;
7617 /* Do we have previous read data to read? */
7618 if (info->read < PAGE_SIZE)
7622 trace_access_lock(iter->cpu_file);
7623 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7627 trace_access_unlock(iter->cpu_file);
7630 if (trace_empty(iter)) {
7631 if ((filp->f_flags & O_NONBLOCK))
7634 ret = wait_on_pipe(iter, 0);
7645 size = PAGE_SIZE - info->read;
7649 ret = copy_to_user(ubuf, info->spare + info->read, size);
7661 static int tracing_buffers_release(struct inode *inode, struct file *file)
7663 struct ftrace_buffer_info *info = file->private_data;
7664 struct trace_iterator *iter = &info->iter;
7666 mutex_lock(&trace_types_lock);
7668 iter->tr->trace_ref--;
7670 __trace_array_put(iter->tr);
7673 ring_buffer_free_read_page(iter->array_buffer->buffer,
7674 info->spare_cpu, info->spare);
7677 mutex_unlock(&trace_types_lock);
7683 struct trace_buffer *buffer;
7686 refcount_t refcount;
7689 static void buffer_ref_release(struct buffer_ref *ref)
7691 if (!refcount_dec_and_test(&ref->refcount))
7693 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7697 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7698 struct pipe_buffer *buf)
7700 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7702 buffer_ref_release(ref);
7706 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7707 struct pipe_buffer *buf)
7709 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7711 if (refcount_read(&ref->refcount) > INT_MAX/2)
7714 refcount_inc(&ref->refcount);
7718 /* Pipe buffer operations for a buffer. */
7719 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7720 .release = buffer_pipe_buf_release,
7721 .get = buffer_pipe_buf_get,
7725 * Callback from splice_to_pipe(), if we need to release some pages
7726 * at the end of the spd in case we error'ed out in filling the pipe.
7728 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7730 struct buffer_ref *ref =
7731 (struct buffer_ref *)spd->partial[i].private;
7733 buffer_ref_release(ref);
7734 spd->partial[i].private = 0;
7738 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7739 struct pipe_inode_info *pipe, size_t len,
7742 struct ftrace_buffer_info *info = file->private_data;
7743 struct trace_iterator *iter = &info->iter;
7744 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7745 struct page *pages_def[PIPE_DEF_BUFFERS];
7746 struct splice_pipe_desc spd = {
7748 .partial = partial_def,
7749 .nr_pages_max = PIPE_DEF_BUFFERS,
7750 .ops = &buffer_pipe_buf_ops,
7751 .spd_release = buffer_spd_release,
7753 struct buffer_ref *ref;
7757 #ifdef CONFIG_TRACER_MAX_TRACE
7758 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7762 if (*ppos & (PAGE_SIZE - 1))
7765 if (len & (PAGE_SIZE - 1)) {
7766 if (len < PAGE_SIZE)
7771 if (splice_grow_spd(pipe, &spd))
7775 trace_access_lock(iter->cpu_file);
7776 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7778 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7782 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7788 refcount_set(&ref->refcount, 1);
7789 ref->buffer = iter->array_buffer->buffer;
7790 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7791 if (IS_ERR(ref->page)) {
7792 ret = PTR_ERR(ref->page);
7797 ref->cpu = iter->cpu_file;
7799 r = ring_buffer_read_page(ref->buffer, &ref->page,
7800 len, iter->cpu_file, 1);
7802 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7808 page = virt_to_page(ref->page);
7810 spd.pages[i] = page;
7811 spd.partial[i].len = PAGE_SIZE;
7812 spd.partial[i].offset = 0;
7813 spd.partial[i].private = (unsigned long)ref;
7817 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7820 trace_access_unlock(iter->cpu_file);
7823 /* did we read anything? */
7824 if (!spd.nr_pages) {
7829 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7832 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7839 ret = splice_to_pipe(pipe, &spd);
7841 splice_shrink_spd(&spd);
7846 static const struct file_operations tracing_buffers_fops = {
7847 .open = tracing_buffers_open,
7848 .read = tracing_buffers_read,
7849 .poll = tracing_buffers_poll,
7850 .release = tracing_buffers_release,
7851 .splice_read = tracing_buffers_splice_read,
7852 .llseek = no_llseek,
7856 tracing_stats_read(struct file *filp, char __user *ubuf,
7857 size_t count, loff_t *ppos)
7859 struct inode *inode = file_inode(filp);
7860 struct trace_array *tr = inode->i_private;
7861 struct array_buffer *trace_buf = &tr->array_buffer;
7862 int cpu = tracing_get_cpu(inode);
7863 struct trace_seq *s;
7865 unsigned long long t;
7866 unsigned long usec_rem;
7868 s = kmalloc(sizeof(*s), GFP_KERNEL);
7874 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7875 trace_seq_printf(s, "entries: %ld\n", cnt);
7877 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7878 trace_seq_printf(s, "overrun: %ld\n", cnt);
7880 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7881 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7883 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7884 trace_seq_printf(s, "bytes: %ld\n", cnt);
7886 if (trace_clocks[tr->clock_id].in_ns) {
7887 /* local or global for trace_clock */
7888 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7889 usec_rem = do_div(t, USEC_PER_SEC);
7890 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7893 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7894 usec_rem = do_div(t, USEC_PER_SEC);
7895 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7897 /* counter or tsc mode for trace_clock */
7898 trace_seq_printf(s, "oldest event ts: %llu\n",
7899 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7901 trace_seq_printf(s, "now ts: %llu\n",
7902 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7905 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7906 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7908 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7909 trace_seq_printf(s, "read events: %ld\n", cnt);
7911 count = simple_read_from_buffer(ubuf, count, ppos,
7912 s->buffer, trace_seq_used(s));
7919 static const struct file_operations tracing_stats_fops = {
7920 .open = tracing_open_generic_tr,
7921 .read = tracing_stats_read,
7922 .llseek = generic_file_llseek,
7923 .release = tracing_release_generic_tr,
7926 #ifdef CONFIG_DYNAMIC_FTRACE
7929 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7930 size_t cnt, loff_t *ppos)
7936 /* 256 should be plenty to hold the amount needed */
7937 buf = kmalloc(256, GFP_KERNEL);
7941 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7942 ftrace_update_tot_cnt,
7943 ftrace_number_of_pages,
7944 ftrace_number_of_groups);
7946 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7951 static const struct file_operations tracing_dyn_info_fops = {
7952 .open = tracing_open_generic,
7953 .read = tracing_read_dyn_info,
7954 .llseek = generic_file_llseek,
7956 #endif /* CONFIG_DYNAMIC_FTRACE */
7958 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7960 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7961 struct trace_array *tr, struct ftrace_probe_ops *ops,
7964 tracing_snapshot_instance(tr);
7968 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7969 struct trace_array *tr, struct ftrace_probe_ops *ops,
7972 struct ftrace_func_mapper *mapper = data;
7976 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7986 tracing_snapshot_instance(tr);
7990 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7991 struct ftrace_probe_ops *ops, void *data)
7993 struct ftrace_func_mapper *mapper = data;
7996 seq_printf(m, "%ps:", (void *)ip);
7998 seq_puts(m, "snapshot");
8001 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8004 seq_printf(m, ":count=%ld\n", *count);
8006 seq_puts(m, ":unlimited\n");
8012 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8013 unsigned long ip, void *init_data, void **data)
8015 struct ftrace_func_mapper *mapper = *data;
8018 mapper = allocate_ftrace_func_mapper();
8024 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8028 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8029 unsigned long ip, void *data)
8031 struct ftrace_func_mapper *mapper = data;
8036 free_ftrace_func_mapper(mapper, NULL);
8040 ftrace_func_mapper_remove_ip(mapper, ip);
8043 static struct ftrace_probe_ops snapshot_probe_ops = {
8044 .func = ftrace_snapshot,
8045 .print = ftrace_snapshot_print,
8048 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8049 .func = ftrace_count_snapshot,
8050 .print = ftrace_snapshot_print,
8051 .init = ftrace_snapshot_init,
8052 .free = ftrace_snapshot_free,
8056 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8057 char *glob, char *cmd, char *param, int enable)
8059 struct ftrace_probe_ops *ops;
8060 void *count = (void *)-1;
8067 /* hash funcs only work with set_ftrace_filter */
8071 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8074 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8079 number = strsep(¶m, ":");
8081 if (!strlen(number))
8085 * We use the callback data field (which is a pointer)
8088 ret = kstrtoul(number, 0, (unsigned long *)&count);
8093 ret = tracing_alloc_snapshot_instance(tr);
8097 ret = register_ftrace_function_probe(glob, tr, ops, count);
8100 return ret < 0 ? ret : 0;
8103 static struct ftrace_func_command ftrace_snapshot_cmd = {
8105 .func = ftrace_trace_snapshot_callback,
8108 static __init int register_snapshot_cmd(void)
8110 return register_ftrace_command(&ftrace_snapshot_cmd);
8113 static inline __init int register_snapshot_cmd(void) { return 0; }
8114 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8116 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8118 if (WARN_ON(!tr->dir))
8119 return ERR_PTR(-ENODEV);
8121 /* Top directory uses NULL as the parent */
8122 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8125 /* All sub buffers have a descriptor */
8129 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8131 struct dentry *d_tracer;
8134 return tr->percpu_dir;
8136 d_tracer = tracing_get_dentry(tr);
8137 if (IS_ERR(d_tracer))
8140 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8142 MEM_FAIL(!tr->percpu_dir,
8143 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8145 return tr->percpu_dir;
8148 static struct dentry *
8149 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8150 void *data, long cpu, const struct file_operations *fops)
8152 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8154 if (ret) /* See tracing_get_cpu() */
8155 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8160 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8162 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8163 struct dentry *d_cpu;
8164 char cpu_dir[30]; /* 30 characters should be more than enough */
8169 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8170 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8172 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8176 /* per cpu trace_pipe */
8177 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8178 tr, cpu, &tracing_pipe_fops);
8181 trace_create_cpu_file("trace", 0644, d_cpu,
8182 tr, cpu, &tracing_fops);
8184 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8185 tr, cpu, &tracing_buffers_fops);
8187 trace_create_cpu_file("stats", 0444, d_cpu,
8188 tr, cpu, &tracing_stats_fops);
8190 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8191 tr, cpu, &tracing_entries_fops);
8193 #ifdef CONFIG_TRACER_SNAPSHOT
8194 trace_create_cpu_file("snapshot", 0644, d_cpu,
8195 tr, cpu, &snapshot_fops);
8197 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8198 tr, cpu, &snapshot_raw_fops);
8202 #ifdef CONFIG_FTRACE_SELFTEST
8203 /* Let selftest have access to static functions in this file */
8204 #include "trace_selftest.c"
8208 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8211 struct trace_option_dentry *topt = filp->private_data;
8214 if (topt->flags->val & topt->opt->bit)
8219 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8223 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8226 struct trace_option_dentry *topt = filp->private_data;
8230 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8234 if (val != 0 && val != 1)
8237 if (!!(topt->flags->val & topt->opt->bit) != val) {
8238 mutex_lock(&trace_types_lock);
8239 ret = __set_tracer_option(topt->tr, topt->flags,
8241 mutex_unlock(&trace_types_lock);
8252 static const struct file_operations trace_options_fops = {
8253 .open = tracing_open_generic,
8254 .read = trace_options_read,
8255 .write = trace_options_write,
8256 .llseek = generic_file_llseek,
8260 * In order to pass in both the trace_array descriptor as well as the index
8261 * to the flag that the trace option file represents, the trace_array
8262 * has a character array of trace_flags_index[], which holds the index
8263 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8264 * The address of this character array is passed to the flag option file
8265 * read/write callbacks.
8267 * In order to extract both the index and the trace_array descriptor,
8268 * get_tr_index() uses the following algorithm.
8272 * As the pointer itself contains the address of the index (remember
8275 * Then to get the trace_array descriptor, by subtracting that index
8276 * from the ptr, we get to the start of the index itself.
8278 * ptr - idx == &index[0]
8280 * Then a simple container_of() from that pointer gets us to the
8281 * trace_array descriptor.
8283 static void get_tr_index(void *data, struct trace_array **ptr,
8284 unsigned int *pindex)
8286 *pindex = *(unsigned char *)data;
8288 *ptr = container_of(data - *pindex, struct trace_array,
8293 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8296 void *tr_index = filp->private_data;
8297 struct trace_array *tr;
8301 get_tr_index(tr_index, &tr, &index);
8303 if (tr->trace_flags & (1 << index))
8308 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8312 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8315 void *tr_index = filp->private_data;
8316 struct trace_array *tr;
8321 get_tr_index(tr_index, &tr, &index);
8323 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8327 if (val != 0 && val != 1)
8330 mutex_lock(&event_mutex);
8331 mutex_lock(&trace_types_lock);
8332 ret = set_tracer_flag(tr, 1 << index, val);
8333 mutex_unlock(&trace_types_lock);
8334 mutex_unlock(&event_mutex);
8344 static const struct file_operations trace_options_core_fops = {
8345 .open = tracing_open_generic,
8346 .read = trace_options_core_read,
8347 .write = trace_options_core_write,
8348 .llseek = generic_file_llseek,
8351 struct dentry *trace_create_file(const char *name,
8353 struct dentry *parent,
8355 const struct file_operations *fops)
8359 ret = tracefs_create_file(name, mode, parent, data, fops);
8361 pr_warn("Could not create tracefs '%s' entry\n", name);
8367 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8369 struct dentry *d_tracer;
8374 d_tracer = tracing_get_dentry(tr);
8375 if (IS_ERR(d_tracer))
8378 tr->options = tracefs_create_dir("options", d_tracer);
8380 pr_warn("Could not create tracefs directory 'options'\n");
8388 create_trace_option_file(struct trace_array *tr,
8389 struct trace_option_dentry *topt,
8390 struct tracer_flags *flags,
8391 struct tracer_opt *opt)
8393 struct dentry *t_options;
8395 t_options = trace_options_init_dentry(tr);
8399 topt->flags = flags;
8403 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8404 &trace_options_fops);
8409 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8411 struct trace_option_dentry *topts;
8412 struct trace_options *tr_topts;
8413 struct tracer_flags *flags;
8414 struct tracer_opt *opts;
8421 flags = tracer->flags;
8423 if (!flags || !flags->opts)
8427 * If this is an instance, only create flags for tracers
8428 * the instance may have.
8430 if (!trace_ok_for_array(tracer, tr))
8433 for (i = 0; i < tr->nr_topts; i++) {
8434 /* Make sure there's no duplicate flags. */
8435 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8441 for (cnt = 0; opts[cnt].name; cnt++)
8444 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8448 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8455 tr->topts = tr_topts;
8456 tr->topts[tr->nr_topts].tracer = tracer;
8457 tr->topts[tr->nr_topts].topts = topts;
8460 for (cnt = 0; opts[cnt].name; cnt++) {
8461 create_trace_option_file(tr, &topts[cnt], flags,
8463 MEM_FAIL(topts[cnt].entry == NULL,
8464 "Failed to create trace option: %s",
8469 static struct dentry *
8470 create_trace_option_core_file(struct trace_array *tr,
8471 const char *option, long index)
8473 struct dentry *t_options;
8475 t_options = trace_options_init_dentry(tr);
8479 return trace_create_file(option, 0644, t_options,
8480 (void *)&tr->trace_flags_index[index],
8481 &trace_options_core_fops);
8484 static void create_trace_options_dir(struct trace_array *tr)
8486 struct dentry *t_options;
8487 bool top_level = tr == &global_trace;
8490 t_options = trace_options_init_dentry(tr);
8494 for (i = 0; trace_options[i]; i++) {
8496 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8497 create_trace_option_core_file(tr, trace_options[i], i);
8502 rb_simple_read(struct file *filp, char __user *ubuf,
8503 size_t cnt, loff_t *ppos)
8505 struct trace_array *tr = filp->private_data;
8509 r = tracer_tracing_is_on(tr);
8510 r = sprintf(buf, "%d\n", r);
8512 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8516 rb_simple_write(struct file *filp, const char __user *ubuf,
8517 size_t cnt, loff_t *ppos)
8519 struct trace_array *tr = filp->private_data;
8520 struct trace_buffer *buffer = tr->array_buffer.buffer;
8524 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8529 mutex_lock(&trace_types_lock);
8530 if (!!val == tracer_tracing_is_on(tr)) {
8531 val = 0; /* do nothing */
8533 tracer_tracing_on(tr);
8534 if (tr->current_trace->start)
8535 tr->current_trace->start(tr);
8537 tracer_tracing_off(tr);
8538 if (tr->current_trace->stop)
8539 tr->current_trace->stop(tr);
8541 mutex_unlock(&trace_types_lock);
8549 static const struct file_operations rb_simple_fops = {
8550 .open = tracing_open_generic_tr,
8551 .read = rb_simple_read,
8552 .write = rb_simple_write,
8553 .release = tracing_release_generic_tr,
8554 .llseek = default_llseek,
8558 buffer_percent_read(struct file *filp, char __user *ubuf,
8559 size_t cnt, loff_t *ppos)
8561 struct trace_array *tr = filp->private_data;
8565 r = tr->buffer_percent;
8566 r = sprintf(buf, "%d\n", r);
8568 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8572 buffer_percent_write(struct file *filp, const char __user *ubuf,
8573 size_t cnt, loff_t *ppos)
8575 struct trace_array *tr = filp->private_data;
8579 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8589 tr->buffer_percent = val;
8596 static const struct file_operations buffer_percent_fops = {
8597 .open = tracing_open_generic_tr,
8598 .read = buffer_percent_read,
8599 .write = buffer_percent_write,
8600 .release = tracing_release_generic_tr,
8601 .llseek = default_llseek,
8604 static struct dentry *trace_instance_dir;
8607 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8610 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8612 enum ring_buffer_flags rb_flags;
8614 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8618 buf->buffer = ring_buffer_alloc(size, rb_flags);
8622 buf->data = alloc_percpu(struct trace_array_cpu);
8624 ring_buffer_free(buf->buffer);
8629 /* Allocate the first page for all buffers */
8630 set_buffer_entries(&tr->array_buffer,
8631 ring_buffer_size(tr->array_buffer.buffer, 0));
8636 static int allocate_trace_buffers(struct trace_array *tr, int size)
8640 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8644 #ifdef CONFIG_TRACER_MAX_TRACE
8645 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8646 allocate_snapshot ? size : 1);
8647 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8648 ring_buffer_free(tr->array_buffer.buffer);
8649 tr->array_buffer.buffer = NULL;
8650 free_percpu(tr->array_buffer.data);
8651 tr->array_buffer.data = NULL;
8654 tr->allocated_snapshot = allocate_snapshot;
8657 * Only the top level trace array gets its snapshot allocated
8658 * from the kernel command line.
8660 allocate_snapshot = false;
8666 static void free_trace_buffer(struct array_buffer *buf)
8669 ring_buffer_free(buf->buffer);
8671 free_percpu(buf->data);
8676 static void free_trace_buffers(struct trace_array *tr)
8681 free_trace_buffer(&tr->array_buffer);
8683 #ifdef CONFIG_TRACER_MAX_TRACE
8684 free_trace_buffer(&tr->max_buffer);
8688 static void init_trace_flags_index(struct trace_array *tr)
8692 /* Used by the trace options files */
8693 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8694 tr->trace_flags_index[i] = i;
8697 static void __update_tracer_options(struct trace_array *tr)
8701 for (t = trace_types; t; t = t->next)
8702 add_tracer_options(tr, t);
8705 static void update_tracer_options(struct trace_array *tr)
8707 mutex_lock(&trace_types_lock);
8708 __update_tracer_options(tr);
8709 mutex_unlock(&trace_types_lock);
8712 /* Must have trace_types_lock held */
8713 struct trace_array *trace_array_find(const char *instance)
8715 struct trace_array *tr, *found = NULL;
8717 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8718 if (tr->name && strcmp(tr->name, instance) == 0) {
8727 struct trace_array *trace_array_find_get(const char *instance)
8729 struct trace_array *tr;
8731 mutex_lock(&trace_types_lock);
8732 tr = trace_array_find(instance);
8735 mutex_unlock(&trace_types_lock);
8740 static int trace_array_create_dir(struct trace_array *tr)
8744 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8748 ret = event_trace_add_tracer(tr->dir, tr);
8750 tracefs_remove(tr->dir);
8752 init_tracer_tracefs(tr, tr->dir);
8753 __update_tracer_options(tr);
8758 static struct trace_array *trace_array_create(const char *name)
8760 struct trace_array *tr;
8764 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8766 return ERR_PTR(ret);
8768 tr->name = kstrdup(name, GFP_KERNEL);
8772 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8775 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8777 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8779 raw_spin_lock_init(&tr->start_lock);
8781 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8783 tr->current_trace = &nop_trace;
8785 INIT_LIST_HEAD(&tr->systems);
8786 INIT_LIST_HEAD(&tr->events);
8787 INIT_LIST_HEAD(&tr->hist_vars);
8788 INIT_LIST_HEAD(&tr->err_log);
8790 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8793 if (ftrace_allocate_ftrace_ops(tr) < 0)
8796 ftrace_init_trace_array(tr);
8798 init_trace_flags_index(tr);
8800 if (trace_instance_dir) {
8801 ret = trace_array_create_dir(tr);
8805 __trace_early_add_events(tr);
8807 list_add(&tr->list, &ftrace_trace_arrays);
8814 ftrace_free_ftrace_ops(tr);
8815 free_trace_buffers(tr);
8816 free_cpumask_var(tr->tracing_cpumask);
8820 return ERR_PTR(ret);
8823 static int instance_mkdir(const char *name)
8825 struct trace_array *tr;
8828 mutex_lock(&event_mutex);
8829 mutex_lock(&trace_types_lock);
8832 if (trace_array_find(name))
8835 tr = trace_array_create(name);
8837 ret = PTR_ERR_OR_ZERO(tr);
8840 mutex_unlock(&trace_types_lock);
8841 mutex_unlock(&event_mutex);
8846 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8847 * @name: The name of the trace array to be looked up/created.
8849 * Returns pointer to trace array with given name.
8850 * NULL, if it cannot be created.
8852 * NOTE: This function increments the reference counter associated with the
8853 * trace array returned. This makes sure it cannot be freed while in use.
8854 * Use trace_array_put() once the trace array is no longer needed.
8855 * If the trace_array is to be freed, trace_array_destroy() needs to
8856 * be called after the trace_array_put(), or simply let user space delete
8857 * it from the tracefs instances directory. But until the
8858 * trace_array_put() is called, user space can not delete it.
8861 struct trace_array *trace_array_get_by_name(const char *name)
8863 struct trace_array *tr;
8865 mutex_lock(&event_mutex);
8866 mutex_lock(&trace_types_lock);
8868 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8869 if (tr->name && strcmp(tr->name, name) == 0)
8873 tr = trace_array_create(name);
8881 mutex_unlock(&trace_types_lock);
8882 mutex_unlock(&event_mutex);
8885 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8887 static int __remove_instance(struct trace_array *tr)
8891 /* Reference counter for a newly created trace array = 1. */
8892 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
8895 list_del(&tr->list);
8897 /* Disable all the flags that were enabled coming in */
8898 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8899 if ((1 << i) & ZEROED_TRACE_FLAGS)
8900 set_tracer_flag(tr, 1 << i, 0);
8903 tracing_set_nop(tr);
8904 clear_ftrace_function_probes(tr);
8905 event_trace_del_tracer(tr);
8906 ftrace_clear_pids(tr);
8907 ftrace_destroy_function_files(tr);
8908 tracefs_remove(tr->dir);
8909 free_trace_buffers(tr);
8911 for (i = 0; i < tr->nr_topts; i++) {
8912 kfree(tr->topts[i].topts);
8916 free_cpumask_var(tr->tracing_cpumask);
8923 int trace_array_destroy(struct trace_array *this_tr)
8925 struct trace_array *tr;
8931 mutex_lock(&event_mutex);
8932 mutex_lock(&trace_types_lock);
8936 /* Making sure trace array exists before destroying it. */
8937 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8938 if (tr == this_tr) {
8939 ret = __remove_instance(tr);
8944 mutex_unlock(&trace_types_lock);
8945 mutex_unlock(&event_mutex);
8949 EXPORT_SYMBOL_GPL(trace_array_destroy);
8951 static int instance_rmdir(const char *name)
8953 struct trace_array *tr;
8956 mutex_lock(&event_mutex);
8957 mutex_lock(&trace_types_lock);
8960 tr = trace_array_find(name);
8962 ret = __remove_instance(tr);
8964 mutex_unlock(&trace_types_lock);
8965 mutex_unlock(&event_mutex);
8970 static __init void create_trace_instances(struct dentry *d_tracer)
8972 struct trace_array *tr;
8974 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8977 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
8980 mutex_lock(&event_mutex);
8981 mutex_lock(&trace_types_lock);
8983 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8986 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
8987 "Failed to create instance directory\n"))
8991 mutex_unlock(&trace_types_lock);
8992 mutex_unlock(&event_mutex);
8996 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8998 struct trace_event_file *file;
9001 trace_create_file("available_tracers", 0444, d_tracer,
9002 tr, &show_traces_fops);
9004 trace_create_file("current_tracer", 0644, d_tracer,
9005 tr, &set_tracer_fops);
9007 trace_create_file("tracing_cpumask", 0644, d_tracer,
9008 tr, &tracing_cpumask_fops);
9010 trace_create_file("trace_options", 0644, d_tracer,
9011 tr, &tracing_iter_fops);
9013 trace_create_file("trace", 0644, d_tracer,
9016 trace_create_file("trace_pipe", 0444, d_tracer,
9017 tr, &tracing_pipe_fops);
9019 trace_create_file("buffer_size_kb", 0644, d_tracer,
9020 tr, &tracing_entries_fops);
9022 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9023 tr, &tracing_total_entries_fops);
9025 trace_create_file("free_buffer", 0200, d_tracer,
9026 tr, &tracing_free_buffer_fops);
9028 trace_create_file("trace_marker", 0220, d_tracer,
9029 tr, &tracing_mark_fops);
9031 file = __find_event_file(tr, "ftrace", "print");
9032 if (file && file->dir)
9033 trace_create_file("trigger", 0644, file->dir, file,
9034 &event_trigger_fops);
9035 tr->trace_marker_file = file;
9037 trace_create_file("trace_marker_raw", 0220, d_tracer,
9038 tr, &tracing_mark_raw_fops);
9040 trace_create_file("trace_clock", 0644, d_tracer, tr,
9043 trace_create_file("tracing_on", 0644, d_tracer,
9044 tr, &rb_simple_fops);
9046 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9047 &trace_time_stamp_mode_fops);
9049 tr->buffer_percent = 50;
9051 trace_create_file("buffer_percent", 0444, d_tracer,
9052 tr, &buffer_percent_fops);
9054 create_trace_options_dir(tr);
9056 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
9057 trace_create_maxlat_file(tr, d_tracer);
9060 if (ftrace_create_function_files(tr, d_tracer))
9061 MEM_FAIL(1, "Could not allocate function filter files");
9063 #ifdef CONFIG_TRACER_SNAPSHOT
9064 trace_create_file("snapshot", 0644, d_tracer,
9065 tr, &snapshot_fops);
9068 trace_create_file("error_log", 0644, d_tracer,
9069 tr, &tracing_err_log_fops);
9071 for_each_tracing_cpu(cpu)
9072 tracing_init_tracefs_percpu(tr, cpu);
9074 ftrace_init_tracefs(tr, d_tracer);
9077 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9079 struct vfsmount *mnt;
9080 struct file_system_type *type;
9083 * To maintain backward compatibility for tools that mount
9084 * debugfs to get to the tracing facility, tracefs is automatically
9085 * mounted to the debugfs/tracing directory.
9087 type = get_fs_type("tracefs");
9090 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9091 put_filesystem(type);
9100 * tracing_init_dentry - initialize top level trace array
9102 * This is called when creating files or directories in the tracing
9103 * directory. It is called via fs_initcall() by any of the boot up code
9104 * and expects to return the dentry of the top level tracing directory.
9106 int tracing_init_dentry(void)
9108 struct trace_array *tr = &global_trace;
9110 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9111 pr_warn("Tracing disabled due to lockdown\n");
9115 /* The top level trace array uses NULL as parent */
9119 if (WARN_ON(!tracefs_initialized()))
9123 * As there may still be users that expect the tracing
9124 * files to exist in debugfs/tracing, we must automount
9125 * the tracefs file system there, so older tools still
9126 * work with the newer kerenl.
9128 tr->dir = debugfs_create_automount("tracing", NULL,
9129 trace_automount, NULL);
9134 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9135 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9137 static struct workqueue_struct *eval_map_wq __initdata;
9138 static struct work_struct eval_map_work __initdata;
9140 static void __init eval_map_work_func(struct work_struct *work)
9144 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9145 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9148 static int __init trace_eval_init(void)
9150 INIT_WORK(&eval_map_work, eval_map_work_func);
9152 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9154 pr_err("Unable to allocate eval_map_wq\n");
9156 eval_map_work_func(&eval_map_work);
9160 queue_work(eval_map_wq, &eval_map_work);
9164 static int __init trace_eval_sync(void)
9166 /* Make sure the eval map updates are finished */
9168 destroy_workqueue(eval_map_wq);
9172 late_initcall_sync(trace_eval_sync);
9175 #ifdef CONFIG_MODULES
9176 static void trace_module_add_evals(struct module *mod)
9178 if (!mod->num_trace_evals)
9182 * Modules with bad taint do not have events created, do
9183 * not bother with enums either.
9185 if (trace_module_has_bad_taint(mod))
9188 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9191 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9192 static void trace_module_remove_evals(struct module *mod)
9194 union trace_eval_map_item *map;
9195 union trace_eval_map_item **last = &trace_eval_maps;
9197 if (!mod->num_trace_evals)
9200 mutex_lock(&trace_eval_mutex);
9202 map = trace_eval_maps;
9205 if (map->head.mod == mod)
9207 map = trace_eval_jmp_to_tail(map);
9208 last = &map->tail.next;
9209 map = map->tail.next;
9214 *last = trace_eval_jmp_to_tail(map)->tail.next;
9217 mutex_unlock(&trace_eval_mutex);
9220 static inline void trace_module_remove_evals(struct module *mod) { }
9221 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9223 static int trace_module_notify(struct notifier_block *self,
9224 unsigned long val, void *data)
9226 struct module *mod = data;
9229 case MODULE_STATE_COMING:
9230 trace_module_add_evals(mod);
9232 case MODULE_STATE_GOING:
9233 trace_module_remove_evals(mod);
9240 static struct notifier_block trace_module_nb = {
9241 .notifier_call = trace_module_notify,
9244 #endif /* CONFIG_MODULES */
9246 static __init int tracer_init_tracefs(void)
9250 trace_access_lock_init();
9252 ret = tracing_init_dentry();
9258 init_tracer_tracefs(&global_trace, NULL);
9259 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9261 trace_create_file("tracing_thresh", 0644, NULL,
9262 &global_trace, &tracing_thresh_fops);
9264 trace_create_file("README", 0444, NULL,
9265 NULL, &tracing_readme_fops);
9267 trace_create_file("saved_cmdlines", 0444, NULL,
9268 NULL, &tracing_saved_cmdlines_fops);
9270 trace_create_file("saved_cmdlines_size", 0644, NULL,
9271 NULL, &tracing_saved_cmdlines_size_fops);
9273 trace_create_file("saved_tgids", 0444, NULL,
9274 NULL, &tracing_saved_tgids_fops);
9278 trace_create_eval_file(NULL);
9280 #ifdef CONFIG_MODULES
9281 register_module_notifier(&trace_module_nb);
9284 #ifdef CONFIG_DYNAMIC_FTRACE
9285 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
9286 NULL, &tracing_dyn_info_fops);
9289 create_trace_instances(NULL);
9291 update_tracer_options(&global_trace);
9296 static int trace_panic_handler(struct notifier_block *this,
9297 unsigned long event, void *unused)
9299 if (ftrace_dump_on_oops)
9300 ftrace_dump(ftrace_dump_on_oops);
9304 static struct notifier_block trace_panic_notifier = {
9305 .notifier_call = trace_panic_handler,
9307 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9310 static int trace_die_handler(struct notifier_block *self,
9316 if (ftrace_dump_on_oops)
9317 ftrace_dump(ftrace_dump_on_oops);
9325 static struct notifier_block trace_die_notifier = {
9326 .notifier_call = trace_die_handler,
9331 * printk is set to max of 1024, we really don't need it that big.
9332 * Nothing should be printing 1000 characters anyway.
9334 #define TRACE_MAX_PRINT 1000
9337 * Define here KERN_TRACE so that we have one place to modify
9338 * it if we decide to change what log level the ftrace dump
9341 #define KERN_TRACE KERN_EMERG
9344 trace_printk_seq(struct trace_seq *s)
9346 /* Probably should print a warning here. */
9347 if (s->seq.len >= TRACE_MAX_PRINT)
9348 s->seq.len = TRACE_MAX_PRINT;
9351 * More paranoid code. Although the buffer size is set to
9352 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9353 * an extra layer of protection.
9355 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9356 s->seq.len = s->seq.size - 1;
9358 /* should be zero ended, but we are paranoid. */
9359 s->buffer[s->seq.len] = 0;
9361 printk(KERN_TRACE "%s", s->buffer);
9366 void trace_init_global_iter(struct trace_iterator *iter)
9368 iter->tr = &global_trace;
9369 iter->trace = iter->tr->current_trace;
9370 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9371 iter->array_buffer = &global_trace.array_buffer;
9373 if (iter->trace && iter->trace->open)
9374 iter->trace->open(iter);
9376 /* Annotate start of buffers if we had overruns */
9377 if (ring_buffer_overruns(iter->array_buffer->buffer))
9378 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9380 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9381 if (trace_clocks[iter->tr->clock_id].in_ns)
9382 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9385 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9387 /* use static because iter can be a bit big for the stack */
9388 static struct trace_iterator iter;
9389 static atomic_t dump_running;
9390 struct trace_array *tr = &global_trace;
9391 unsigned int old_userobj;
9392 unsigned long flags;
9395 /* Only allow one dump user at a time. */
9396 if (atomic_inc_return(&dump_running) != 1) {
9397 atomic_dec(&dump_running);
9402 * Always turn off tracing when we dump.
9403 * We don't need to show trace output of what happens
9404 * between multiple crashes.
9406 * If the user does a sysrq-z, then they can re-enable
9407 * tracing with echo 1 > tracing_on.
9411 local_irq_save(flags);
9412 printk_nmi_direct_enter();
9414 /* Simulate the iterator */
9415 trace_init_global_iter(&iter);
9416 /* Can not use kmalloc for iter.temp and iter.fmt */
9417 iter.temp = static_temp_buf;
9418 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9419 iter.fmt = static_fmt_buf;
9420 iter.fmt_size = STATIC_FMT_BUF_SIZE;
9422 for_each_tracing_cpu(cpu) {
9423 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9426 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9428 /* don't look at user memory in panic mode */
9429 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9431 switch (oops_dump_mode) {
9433 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9436 iter.cpu_file = raw_smp_processor_id();
9441 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9442 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9445 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9447 /* Did function tracer already get disabled? */
9448 if (ftrace_is_dead()) {
9449 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9450 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9454 * We need to stop all tracing on all CPUS to read
9455 * the next buffer. This is a bit expensive, but is
9456 * not done often. We fill all what we can read,
9457 * and then release the locks again.
9460 while (!trace_empty(&iter)) {
9463 printk(KERN_TRACE "---------------------------------\n");
9467 trace_iterator_reset(&iter);
9468 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9470 if (trace_find_next_entry_inc(&iter) != NULL) {
9473 ret = print_trace_line(&iter);
9474 if (ret != TRACE_TYPE_NO_CONSUME)
9475 trace_consume(&iter);
9477 touch_nmi_watchdog();
9479 trace_printk_seq(&iter.seq);
9483 printk(KERN_TRACE " (ftrace buffer empty)\n");
9485 printk(KERN_TRACE "---------------------------------\n");
9488 tr->trace_flags |= old_userobj;
9490 for_each_tracing_cpu(cpu) {
9491 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9493 atomic_dec(&dump_running);
9494 printk_nmi_direct_exit();
9495 local_irq_restore(flags);
9497 EXPORT_SYMBOL_GPL(ftrace_dump);
9499 #define WRITE_BUFSIZE 4096
9501 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9502 size_t count, loff_t *ppos,
9503 int (*createfn)(const char *))
9505 char *kbuf, *buf, *tmp;
9510 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9514 while (done < count) {
9515 size = count - done;
9517 if (size >= WRITE_BUFSIZE)
9518 size = WRITE_BUFSIZE - 1;
9520 if (copy_from_user(kbuf, buffer + done, size)) {
9527 tmp = strchr(buf, '\n');
9530 size = tmp - buf + 1;
9533 if (done + size < count) {
9536 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9537 pr_warn("Line length is too long: Should be less than %d\n",
9545 /* Remove comments */
9546 tmp = strchr(buf, '#');
9551 ret = createfn(buf);
9556 } while (done < count);
9566 __init static int tracer_alloc_buffers(void)
9572 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9573 pr_warn("Tracing disabled due to lockdown\n");
9578 * Make sure we don't accidentally add more trace options
9579 * than we have bits for.
9581 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9583 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9586 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9587 goto out_free_buffer_mask;
9589 /* Only allocate trace_printk buffers if a trace_printk exists */
9590 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9591 /* Must be called before global_trace.buffer is allocated */
9592 trace_printk_init_buffers();
9594 /* To save memory, keep the ring buffer size to its minimum */
9595 if (ring_buffer_expanded)
9596 ring_buf_size = trace_buf_size;
9600 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9601 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9603 raw_spin_lock_init(&global_trace.start_lock);
9606 * The prepare callbacks allocates some memory for the ring buffer. We
9607 * don't free the buffer if the CPU goes down. If we were to free
9608 * the buffer, then the user would lose any trace that was in the
9609 * buffer. The memory will be removed once the "instance" is removed.
9611 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9612 "trace/RB:preapre", trace_rb_cpu_prepare,
9615 goto out_free_cpumask;
9616 /* Used for event triggers */
9618 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9620 goto out_rm_hp_state;
9622 if (trace_create_savedcmd() < 0)
9623 goto out_free_temp_buffer;
9625 /* TODO: make the number of buffers hot pluggable with CPUS */
9626 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9627 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9628 goto out_free_savedcmd;
9631 if (global_trace.buffer_disabled)
9634 if (trace_boot_clock) {
9635 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9637 pr_warn("Trace clock %s not defined, going back to default\n",
9642 * register_tracer() might reference current_trace, so it
9643 * needs to be set before we register anything. This is
9644 * just a bootstrap of current_trace anyway.
9646 global_trace.current_trace = &nop_trace;
9648 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9650 ftrace_init_global_array_ops(&global_trace);
9652 init_trace_flags_index(&global_trace);
9654 register_tracer(&nop_trace);
9656 /* Function tracing may start here (via kernel command line) */
9657 init_function_trace();
9659 /* All seems OK, enable tracing */
9660 tracing_disabled = 0;
9662 atomic_notifier_chain_register(&panic_notifier_list,
9663 &trace_panic_notifier);
9665 register_die_notifier(&trace_die_notifier);
9667 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9669 INIT_LIST_HEAD(&global_trace.systems);
9670 INIT_LIST_HEAD(&global_trace.events);
9671 INIT_LIST_HEAD(&global_trace.hist_vars);
9672 INIT_LIST_HEAD(&global_trace.err_log);
9673 list_add(&global_trace.list, &ftrace_trace_arrays);
9675 apply_trace_boot_options();
9677 register_snapshot_cmd();
9682 free_saved_cmdlines_buffer(savedcmd);
9683 out_free_temp_buffer:
9684 ring_buffer_free(temp_buffer);
9686 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9688 free_cpumask_var(global_trace.tracing_cpumask);
9689 out_free_buffer_mask:
9690 free_cpumask_var(tracing_buffer_mask);
9695 void __init early_trace_init(void)
9697 if (tracepoint_printk) {
9698 tracepoint_print_iter =
9699 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9700 if (MEM_FAIL(!tracepoint_print_iter,
9701 "Failed to allocate trace iterator\n"))
9702 tracepoint_printk = 0;
9704 static_key_enable(&tracepoint_printk_key.key);
9706 tracer_alloc_buffers();
9709 void __init trace_init(void)
9714 __init static int clear_boot_tracer(void)
9717 * The default tracer at boot buffer is an init section.
9718 * This function is called in lateinit. If we did not
9719 * find the boot tracer, then clear it out, to prevent
9720 * later registration from accessing the buffer that is
9721 * about to be freed.
9723 if (!default_bootup_tracer)
9726 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9727 default_bootup_tracer);
9728 default_bootup_tracer = NULL;
9733 fs_initcall(tracer_init_tracefs);
9734 late_initcall_sync(clear_boot_tracer);
9736 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9737 __init static int tracing_set_default_clock(void)
9739 /* sched_clock_stable() is determined in late_initcall */
9740 if (!trace_boot_clock && !sched_clock_stable()) {
9741 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9742 pr_warn("Can not set tracing clock due to lockdown\n");
9747 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9748 "If you want to keep using the local clock, then add:\n"
9749 " \"trace_clock=local\"\n"
9750 "on the kernel command line\n");
9751 tracing_set_clock(&global_trace, "global");
9756 late_initcall_sync(tracing_set_default_clock);