1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
54 #include "trace_output.h"
57 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
60 bool ring_buffer_expanded;
63 * We need to change this state when a selftest is running.
64 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
66 * insertions into the ring-buffer such as trace_printk could occurred
67 * at the same time, giving false positive or negative results.
69 static bool __read_mostly tracing_selftest_running;
72 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
75 bool __read_mostly tracing_selftest_disabled;
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
117 static int tracing_disabled = 1;
119 cpumask_var_t __read_mostly tracing_buffer_mask;
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
137 enum ftrace_dump_mode ftrace_dump_on_oops;
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
146 unsigned long length;
149 union trace_eval_map_item;
151 struct trace_eval_map_tail {
153 * "end" is first and points to NULL as it must be different
154 * than "mod" or "eval_string"
156 union trace_eval_map_item *next;
157 const char *end; /* points to NULL */
160 static DEFINE_MUTEX(trace_eval_mutex);
163 * The trace_eval_maps are saved in an array with two extra elements,
164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
167 * pointer to the next array of saved eval_map items.
169 union trace_eval_map_item {
170 struct trace_eval_map map;
171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
181 unsigned int trace_ctx);
183 #define MAX_TRACER_SIZE 100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
187 static bool allocate_snapshot;
188 static bool snapshot_at_boot;
190 static int __init set_cmdline_ftrace(char *str)
192 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
193 default_bootup_tracer = bootup_tracer_buf;
194 /* We are using ftrace early, expand it */
195 ring_buffer_expanded = true;
198 __setup("ftrace=", set_cmdline_ftrace);
200 static int __init set_ftrace_dump_on_oops(char *str)
202 if (*str++ != '=' || !*str || !strcmp("1", str)) {
203 ftrace_dump_on_oops = DUMP_ALL;
207 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
208 ftrace_dump_on_oops = DUMP_ORIG;
214 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
216 static int __init stop_trace_on_warning(char *str)
218 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
219 __disable_trace_on_warning = 1;
222 __setup("traceoff_on_warning", stop_trace_on_warning);
224 static int __init boot_alloc_snapshot(char *str)
226 allocate_snapshot = true;
227 /* We also need the main ring buffer expanded */
228 ring_buffer_expanded = true;
231 __setup("alloc_snapshot", boot_alloc_snapshot);
234 static int __init boot_snapshot(char *str)
236 snapshot_at_boot = true;
237 boot_alloc_snapshot(str);
240 __setup("ftrace_boot_snapshot", boot_snapshot);
243 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
245 static int __init set_trace_boot_options(char *str)
247 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
250 __setup("trace_options=", set_trace_boot_options);
252 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
253 static char *trace_boot_clock __initdata;
255 static int __init set_trace_boot_clock(char *str)
257 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
258 trace_boot_clock = trace_boot_clock_buf;
261 __setup("trace_clock=", set_trace_boot_clock);
263 static int __init set_tracepoint_printk(char *str)
265 /* Ignore the "tp_printk_stop_on_boot" param */
269 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
270 tracepoint_printk = 1;
273 __setup("tp_printk", set_tracepoint_printk);
275 static int __init set_tracepoint_printk_stop(char *str)
277 tracepoint_printk_stop_on_boot = true;
280 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
282 unsigned long long ns2usecs(u64 nsec)
290 trace_process_export(struct trace_export *export,
291 struct ring_buffer_event *event, int flag)
293 struct trace_entry *entry;
294 unsigned int size = 0;
296 if (export->flags & flag) {
297 entry = ring_buffer_event_data(event);
298 size = ring_buffer_event_length(event);
299 export->write(export, entry, size);
303 static DEFINE_MUTEX(ftrace_export_lock);
305 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
307 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
308 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
309 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
311 static inline void ftrace_exports_enable(struct trace_export *export)
313 if (export->flags & TRACE_EXPORT_FUNCTION)
314 static_branch_inc(&trace_function_exports_enabled);
316 if (export->flags & TRACE_EXPORT_EVENT)
317 static_branch_inc(&trace_event_exports_enabled);
319 if (export->flags & TRACE_EXPORT_MARKER)
320 static_branch_inc(&trace_marker_exports_enabled);
323 static inline void ftrace_exports_disable(struct trace_export *export)
325 if (export->flags & TRACE_EXPORT_FUNCTION)
326 static_branch_dec(&trace_function_exports_enabled);
328 if (export->flags & TRACE_EXPORT_EVENT)
329 static_branch_dec(&trace_event_exports_enabled);
331 if (export->flags & TRACE_EXPORT_MARKER)
332 static_branch_dec(&trace_marker_exports_enabled);
335 static void ftrace_exports(struct ring_buffer_event *event, int flag)
337 struct trace_export *export;
339 preempt_disable_notrace();
341 export = rcu_dereference_raw_check(ftrace_exports_list);
343 trace_process_export(export, event, flag);
344 export = rcu_dereference_raw_check(export->next);
347 preempt_enable_notrace();
351 add_trace_export(struct trace_export **list, struct trace_export *export)
353 rcu_assign_pointer(export->next, *list);
355 * We are entering export into the list but another
356 * CPU might be walking that list. We need to make sure
357 * the export->next pointer is valid before another CPU sees
358 * the export pointer included into the list.
360 rcu_assign_pointer(*list, export);
364 rm_trace_export(struct trace_export **list, struct trace_export *export)
366 struct trace_export **p;
368 for (p = list; *p != NULL; p = &(*p)->next)
375 rcu_assign_pointer(*p, (*p)->next);
381 add_ftrace_export(struct trace_export **list, struct trace_export *export)
383 ftrace_exports_enable(export);
385 add_trace_export(list, export);
389 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
393 ret = rm_trace_export(list, export);
394 ftrace_exports_disable(export);
399 int register_ftrace_export(struct trace_export *export)
401 if (WARN_ON_ONCE(!export->write))
404 mutex_lock(&ftrace_export_lock);
406 add_ftrace_export(&ftrace_exports_list, export);
408 mutex_unlock(&ftrace_export_lock);
412 EXPORT_SYMBOL_GPL(register_ftrace_export);
414 int unregister_ftrace_export(struct trace_export *export)
418 mutex_lock(&ftrace_export_lock);
420 ret = rm_ftrace_export(&ftrace_exports_list, export);
422 mutex_unlock(&ftrace_export_lock);
426 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
428 /* trace_flags holds trace_options default values */
429 #define TRACE_DEFAULT_FLAGS \
430 (FUNCTION_DEFAULT_FLAGS | \
431 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
432 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
433 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
434 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
437 /* trace_options that are only supported by global_trace */
438 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
439 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
441 /* trace_flags that are default zero for instances */
442 #define ZEROED_TRACE_FLAGS \
443 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
446 * The global_trace is the descriptor that holds the top-level tracing
447 * buffers for the live tracing.
449 static struct trace_array global_trace = {
450 .trace_flags = TRACE_DEFAULT_FLAGS,
453 LIST_HEAD(ftrace_trace_arrays);
455 int trace_array_get(struct trace_array *this_tr)
457 struct trace_array *tr;
460 mutex_lock(&trace_types_lock);
461 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
468 mutex_unlock(&trace_types_lock);
473 static void __trace_array_put(struct trace_array *this_tr)
475 WARN_ON(!this_tr->ref);
480 * trace_array_put - Decrement the reference counter for this trace array.
481 * @this_tr : pointer to the trace array
483 * NOTE: Use this when we no longer need the trace array returned by
484 * trace_array_get_by_name(). This ensures the trace array can be later
488 void trace_array_put(struct trace_array *this_tr)
493 mutex_lock(&trace_types_lock);
494 __trace_array_put(this_tr);
495 mutex_unlock(&trace_types_lock);
497 EXPORT_SYMBOL_GPL(trace_array_put);
499 int tracing_check_open_get_tr(struct trace_array *tr)
503 ret = security_locked_down(LOCKDOWN_TRACEFS);
507 if (tracing_disabled)
510 if (tr && trace_array_get(tr) < 0)
516 int call_filter_check_discard(struct trace_event_call *call, void *rec,
517 struct trace_buffer *buffer,
518 struct ring_buffer_event *event)
520 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
521 !filter_match_preds(call->filter, rec)) {
522 __trace_event_discard_commit(buffer, event);
530 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
531 * @filtered_pids: The list of pids to check
532 * @search_pid: The PID to find in @filtered_pids
534 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
537 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
539 return trace_pid_list_is_set(filtered_pids, search_pid);
543 * trace_ignore_this_task - should a task be ignored for tracing
544 * @filtered_pids: The list of pids to check
545 * @filtered_no_pids: The list of pids not to be traced
546 * @task: The task that should be ignored if not filtered
548 * Checks if @task should be traced or not from @filtered_pids.
549 * Returns true if @task should *NOT* be traced.
550 * Returns false if @task should be traced.
553 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
554 struct trace_pid_list *filtered_no_pids,
555 struct task_struct *task)
558 * If filtered_no_pids is not empty, and the task's pid is listed
559 * in filtered_no_pids, then return true.
560 * Otherwise, if filtered_pids is empty, that means we can
561 * trace all tasks. If it has content, then only trace pids
562 * within filtered_pids.
565 return (filtered_pids &&
566 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
568 trace_find_filtered_pid(filtered_no_pids, task->pid));
572 * trace_filter_add_remove_task - Add or remove a task from a pid_list
573 * @pid_list: The list to modify
574 * @self: The current task for fork or NULL for exit
575 * @task: The task to add or remove
577 * If adding a task, if @self is defined, the task is only added if @self
578 * is also included in @pid_list. This happens on fork and tasks should
579 * only be added when the parent is listed. If @self is NULL, then the
580 * @task pid will be removed from the list, which would happen on exit
583 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
584 struct task_struct *self,
585 struct task_struct *task)
590 /* For forks, we only add if the forking task is listed */
592 if (!trace_find_filtered_pid(pid_list, self->pid))
596 /* "self" is set for forks, and NULL for exits */
598 trace_pid_list_set(pid_list, task->pid);
600 trace_pid_list_clear(pid_list, task->pid);
604 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
605 * @pid_list: The pid list to show
606 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
607 * @pos: The position of the file
609 * This is used by the seq_file "next" operation to iterate the pids
610 * listed in a trace_pid_list structure.
612 * Returns the pid+1 as we want to display pid of zero, but NULL would
613 * stop the iteration.
615 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
617 long pid = (unsigned long)v;
622 /* pid already is +1 of the actual previous bit */
623 if (trace_pid_list_next(pid_list, pid, &next) < 0)
628 /* Return pid + 1 to allow zero to be represented */
629 return (void *)(pid + 1);
633 * trace_pid_start - Used for seq_file to start reading pid lists
634 * @pid_list: The pid list to show
635 * @pos: The position of the file
637 * This is used by seq_file "start" operation to start the iteration
640 * Returns the pid+1 as we want to display pid of zero, but NULL would
641 * stop the iteration.
643 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
649 if (trace_pid_list_first(pid_list, &first) < 0)
654 /* Return pid + 1 so that zero can be the exit value */
655 for (pid++; pid && l < *pos;
656 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
662 * trace_pid_show - show the current pid in seq_file processing
663 * @m: The seq_file structure to write into
664 * @v: A void pointer of the pid (+1) value to display
666 * Can be directly used by seq_file operations to display the current
669 int trace_pid_show(struct seq_file *m, void *v)
671 unsigned long pid = (unsigned long)v - 1;
673 seq_printf(m, "%lu\n", pid);
677 /* 128 should be much more than enough */
678 #define PID_BUF_SIZE 127
680 int trace_pid_write(struct trace_pid_list *filtered_pids,
681 struct trace_pid_list **new_pid_list,
682 const char __user *ubuf, size_t cnt)
684 struct trace_pid_list *pid_list;
685 struct trace_parser parser;
693 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
697 * Always recreate a new array. The write is an all or nothing
698 * operation. Always create a new array when adding new pids by
699 * the user. If the operation fails, then the current list is
702 pid_list = trace_pid_list_alloc();
704 trace_parser_put(&parser);
709 /* copy the current bits to the new max */
710 ret = trace_pid_list_first(filtered_pids, &pid);
712 trace_pid_list_set(pid_list, pid);
713 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
723 ret = trace_get_user(&parser, ubuf, cnt, &pos);
724 if (ret < 0 || !trace_parser_loaded(&parser))
732 if (kstrtoul(parser.buffer, 0, &val))
737 if (trace_pid_list_set(pid_list, pid) < 0) {
743 trace_parser_clear(&parser);
746 trace_parser_put(&parser);
749 trace_pid_list_free(pid_list);
754 /* Cleared the list of pids */
755 trace_pid_list_free(pid_list);
760 *new_pid_list = pid_list;
765 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
769 /* Early boot up does not have a buffer yet */
771 return trace_clock_local();
773 ts = ring_buffer_time_stamp(buf->buffer);
774 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
779 u64 ftrace_now(int cpu)
781 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
785 * tracing_is_enabled - Show if global_trace has been enabled
787 * Shows if the global trace has been enabled or not. It uses the
788 * mirror flag "buffer_disabled" to be used in fast paths such as for
789 * the irqsoff tracer. But it may be inaccurate due to races. If you
790 * need to know the accurate state, use tracing_is_on() which is a little
791 * slower, but accurate.
793 int tracing_is_enabled(void)
796 * For quick access (irqsoff uses this in fast path), just
797 * return the mirror variable of the state of the ring buffer.
798 * It's a little racy, but we don't really care.
801 return !global_trace.buffer_disabled;
805 * trace_buf_size is the size in bytes that is allocated
806 * for a buffer. Note, the number of bytes is always rounded
809 * This number is purposely set to a low number of 16384.
810 * If the dump on oops happens, it will be much appreciated
811 * to not have to wait for all that output. Anyway this can be
812 * boot time and run time configurable.
814 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
816 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
818 /* trace_types holds a link list of available tracers. */
819 static struct tracer *trace_types __read_mostly;
822 * trace_types_lock is used to protect the trace_types list.
824 DEFINE_MUTEX(trace_types_lock);
827 * serialize the access of the ring buffer
829 * ring buffer serializes readers, but it is low level protection.
830 * The validity of the events (which returns by ring_buffer_peek() ..etc)
831 * are not protected by ring buffer.
833 * The content of events may become garbage if we allow other process consumes
834 * these events concurrently:
835 * A) the page of the consumed events may become a normal page
836 * (not reader page) in ring buffer, and this page will be rewritten
837 * by events producer.
838 * B) The page of the consumed events may become a page for splice_read,
839 * and this page will be returned to system.
841 * These primitives allow multi process access to different cpu ring buffer
844 * These primitives don't distinguish read-only and read-consume access.
845 * Multi read-only access are also serialized.
849 static DECLARE_RWSEM(all_cpu_access_lock);
850 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
852 static inline void trace_access_lock(int cpu)
854 if (cpu == RING_BUFFER_ALL_CPUS) {
855 /* gain it for accessing the whole ring buffer. */
856 down_write(&all_cpu_access_lock);
858 /* gain it for accessing a cpu ring buffer. */
860 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
861 down_read(&all_cpu_access_lock);
863 /* Secondly block other access to this @cpu ring buffer. */
864 mutex_lock(&per_cpu(cpu_access_lock, cpu));
868 static inline void trace_access_unlock(int cpu)
870 if (cpu == RING_BUFFER_ALL_CPUS) {
871 up_write(&all_cpu_access_lock);
873 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
874 up_read(&all_cpu_access_lock);
878 static inline void trace_access_lock_init(void)
882 for_each_possible_cpu(cpu)
883 mutex_init(&per_cpu(cpu_access_lock, cpu));
888 static DEFINE_MUTEX(access_lock);
890 static inline void trace_access_lock(int cpu)
893 mutex_lock(&access_lock);
896 static inline void trace_access_unlock(int cpu)
899 mutex_unlock(&access_lock);
902 static inline void trace_access_lock_init(void)
908 #ifdef CONFIG_STACKTRACE
909 static void __ftrace_trace_stack(struct trace_buffer *buffer,
910 unsigned int trace_ctx,
911 int skip, struct pt_regs *regs);
912 static inline void ftrace_trace_stack(struct trace_array *tr,
913 struct trace_buffer *buffer,
914 unsigned int trace_ctx,
915 int skip, struct pt_regs *regs);
918 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
919 unsigned int trace_ctx,
920 int skip, struct pt_regs *regs)
923 static inline void ftrace_trace_stack(struct trace_array *tr,
924 struct trace_buffer *buffer,
925 unsigned long trace_ctx,
926 int skip, struct pt_regs *regs)
932 static __always_inline void
933 trace_event_setup(struct ring_buffer_event *event,
934 int type, unsigned int trace_ctx)
936 struct trace_entry *ent = ring_buffer_event_data(event);
938 tracing_generic_entry_update(ent, type, trace_ctx);
941 static __always_inline struct ring_buffer_event *
942 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
945 unsigned int trace_ctx)
947 struct ring_buffer_event *event;
949 event = ring_buffer_lock_reserve(buffer, len);
951 trace_event_setup(event, type, trace_ctx);
956 void tracer_tracing_on(struct trace_array *tr)
958 if (tr->array_buffer.buffer)
959 ring_buffer_record_on(tr->array_buffer.buffer);
961 * This flag is looked at when buffers haven't been allocated
962 * yet, or by some tracers (like irqsoff), that just want to
963 * know if the ring buffer has been disabled, but it can handle
964 * races of where it gets disabled but we still do a record.
965 * As the check is in the fast path of the tracers, it is more
966 * important to be fast than accurate.
968 tr->buffer_disabled = 0;
969 /* Make the flag seen by readers */
974 * tracing_on - enable tracing buffers
976 * This function enables tracing buffers that may have been
977 * disabled with tracing_off.
979 void tracing_on(void)
981 tracer_tracing_on(&global_trace);
983 EXPORT_SYMBOL_GPL(tracing_on);
986 static __always_inline void
987 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
989 __this_cpu_write(trace_taskinfo_save, true);
991 /* If this is the temp buffer, we need to commit fully */
992 if (this_cpu_read(trace_buffered_event) == event) {
993 /* Length is in event->array[0] */
994 ring_buffer_write(buffer, event->array[0], &event->array[1]);
995 /* Release the temp buffer */
996 this_cpu_dec(trace_buffered_event_cnt);
997 /* ring_buffer_unlock_commit() enables preemption */
998 preempt_enable_notrace();
1000 ring_buffer_unlock_commit(buffer, event);
1004 * __trace_puts - write a constant string into the trace buffer.
1005 * @ip: The address of the caller
1006 * @str: The constant string to write
1007 * @size: The size of the string.
1009 int __trace_puts(unsigned long ip, const char *str, int size)
1011 struct ring_buffer_event *event;
1012 struct trace_buffer *buffer;
1013 struct print_entry *entry;
1014 unsigned int trace_ctx;
1017 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1020 if (unlikely(tracing_selftest_running || tracing_disabled))
1023 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1025 trace_ctx = tracing_gen_ctx();
1026 buffer = global_trace.array_buffer.buffer;
1027 ring_buffer_nest_start(buffer);
1028 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1035 entry = ring_buffer_event_data(event);
1038 memcpy(&entry->buf, str, size);
1040 /* Add a newline if necessary */
1041 if (entry->buf[size - 1] != '\n') {
1042 entry->buf[size] = '\n';
1043 entry->buf[size + 1] = '\0';
1045 entry->buf[size] = '\0';
1047 __buffer_unlock_commit(buffer, event);
1048 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1050 ring_buffer_nest_end(buffer);
1053 EXPORT_SYMBOL_GPL(__trace_puts);
1056 * __trace_bputs - write the pointer to a constant string into trace buffer
1057 * @ip: The address of the caller
1058 * @str: The constant string to write to the buffer to
1060 int __trace_bputs(unsigned long ip, const char *str)
1062 struct ring_buffer_event *event;
1063 struct trace_buffer *buffer;
1064 struct bputs_entry *entry;
1065 unsigned int trace_ctx;
1066 int size = sizeof(struct bputs_entry);
1069 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1072 if (unlikely(tracing_selftest_running || tracing_disabled))
1075 trace_ctx = tracing_gen_ctx();
1076 buffer = global_trace.array_buffer.buffer;
1078 ring_buffer_nest_start(buffer);
1079 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1084 entry = ring_buffer_event_data(event);
1088 __buffer_unlock_commit(buffer, event);
1089 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1093 ring_buffer_nest_end(buffer);
1096 EXPORT_SYMBOL_GPL(__trace_bputs);
1098 #ifdef CONFIG_TRACER_SNAPSHOT
1099 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1102 struct tracer *tracer = tr->current_trace;
1103 unsigned long flags;
1106 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1107 internal_trace_puts("*** snapshot is being ignored ***\n");
1111 if (!tr->allocated_snapshot) {
1112 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1113 internal_trace_puts("*** stopping trace here! ***\n");
1118 /* Note, snapshot can not be used when the tracer uses it */
1119 if (tracer->use_max_tr) {
1120 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1121 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1125 local_irq_save(flags);
1126 update_max_tr(tr, current, smp_processor_id(), cond_data);
1127 local_irq_restore(flags);
1130 void tracing_snapshot_instance(struct trace_array *tr)
1132 tracing_snapshot_instance_cond(tr, NULL);
1136 * tracing_snapshot - take a snapshot of the current buffer.
1138 * This causes a swap between the snapshot buffer and the current live
1139 * tracing buffer. You can use this to take snapshots of the live
1140 * trace when some condition is triggered, but continue to trace.
1142 * Note, make sure to allocate the snapshot with either
1143 * a tracing_snapshot_alloc(), or by doing it manually
1144 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1146 * If the snapshot buffer is not allocated, it will stop tracing.
1147 * Basically making a permanent snapshot.
1149 void tracing_snapshot(void)
1151 struct trace_array *tr = &global_trace;
1153 tracing_snapshot_instance(tr);
1155 EXPORT_SYMBOL_GPL(tracing_snapshot);
1158 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1159 * @tr: The tracing instance to snapshot
1160 * @cond_data: The data to be tested conditionally, and possibly saved
1162 * This is the same as tracing_snapshot() except that the snapshot is
1163 * conditional - the snapshot will only happen if the
1164 * cond_snapshot.update() implementation receiving the cond_data
1165 * returns true, which means that the trace array's cond_snapshot
1166 * update() operation used the cond_data to determine whether the
1167 * snapshot should be taken, and if it was, presumably saved it along
1168 * with the snapshot.
1170 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1172 tracing_snapshot_instance_cond(tr, cond_data);
1174 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1177 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1178 * @tr: The tracing instance
1180 * When the user enables a conditional snapshot using
1181 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1182 * with the snapshot. This accessor is used to retrieve it.
1184 * Should not be called from cond_snapshot.update(), since it takes
1185 * the tr->max_lock lock, which the code calling
1186 * cond_snapshot.update() has already done.
1188 * Returns the cond_data associated with the trace array's snapshot.
1190 void *tracing_cond_snapshot_data(struct trace_array *tr)
1192 void *cond_data = NULL;
1194 arch_spin_lock(&tr->max_lock);
1196 if (tr->cond_snapshot)
1197 cond_data = tr->cond_snapshot->cond_data;
1199 arch_spin_unlock(&tr->max_lock);
1203 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1205 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1206 struct array_buffer *size_buf, int cpu_id);
1207 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1209 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1213 if (!tr->allocated_snapshot) {
1215 /* allocate spare buffer */
1216 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1217 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1221 tr->allocated_snapshot = true;
1227 static void free_snapshot(struct trace_array *tr)
1230 * We don't free the ring buffer. instead, resize it because
1231 * The max_tr ring buffer has some state (e.g. ring->clock) and
1232 * we want preserve it.
1234 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1235 set_buffer_entries(&tr->max_buffer, 1);
1236 tracing_reset_online_cpus(&tr->max_buffer);
1237 tr->allocated_snapshot = false;
1241 * tracing_alloc_snapshot - allocate snapshot buffer.
1243 * This only allocates the snapshot buffer if it isn't already
1244 * allocated - it doesn't also take a snapshot.
1246 * This is meant to be used in cases where the snapshot buffer needs
1247 * to be set up for events that can't sleep but need to be able to
1248 * trigger a snapshot.
1250 int tracing_alloc_snapshot(void)
1252 struct trace_array *tr = &global_trace;
1255 ret = tracing_alloc_snapshot_instance(tr);
1260 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1263 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1265 * This is similar to tracing_snapshot(), but it will allocate the
1266 * snapshot buffer if it isn't already allocated. Use this only
1267 * where it is safe to sleep, as the allocation may sleep.
1269 * This causes a swap between the snapshot buffer and the current live
1270 * tracing buffer. You can use this to take snapshots of the live
1271 * trace when some condition is triggered, but continue to trace.
1273 void tracing_snapshot_alloc(void)
1277 ret = tracing_alloc_snapshot();
1283 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1286 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1287 * @tr: The tracing instance
1288 * @cond_data: User data to associate with the snapshot
1289 * @update: Implementation of the cond_snapshot update function
1291 * Check whether the conditional snapshot for the given instance has
1292 * already been enabled, or if the current tracer is already using a
1293 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1294 * save the cond_data and update function inside.
1296 * Returns 0 if successful, error otherwise.
1298 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1299 cond_update_fn_t update)
1301 struct cond_snapshot *cond_snapshot;
1304 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1308 cond_snapshot->cond_data = cond_data;
1309 cond_snapshot->update = update;
1311 mutex_lock(&trace_types_lock);
1313 ret = tracing_alloc_snapshot_instance(tr);
1317 if (tr->current_trace->use_max_tr) {
1323 * The cond_snapshot can only change to NULL without the
1324 * trace_types_lock. We don't care if we race with it going
1325 * to NULL, but we want to make sure that it's not set to
1326 * something other than NULL when we get here, which we can
1327 * do safely with only holding the trace_types_lock and not
1328 * having to take the max_lock.
1330 if (tr->cond_snapshot) {
1335 arch_spin_lock(&tr->max_lock);
1336 tr->cond_snapshot = cond_snapshot;
1337 arch_spin_unlock(&tr->max_lock);
1339 mutex_unlock(&trace_types_lock);
1344 mutex_unlock(&trace_types_lock);
1345 kfree(cond_snapshot);
1348 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1351 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1352 * @tr: The tracing instance
1354 * Check whether the conditional snapshot for the given instance is
1355 * enabled; if so, free the cond_snapshot associated with it,
1356 * otherwise return -EINVAL.
1358 * Returns 0 if successful, error otherwise.
1360 int tracing_snapshot_cond_disable(struct trace_array *tr)
1364 arch_spin_lock(&tr->max_lock);
1366 if (!tr->cond_snapshot)
1369 kfree(tr->cond_snapshot);
1370 tr->cond_snapshot = NULL;
1373 arch_spin_unlock(&tr->max_lock);
1377 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1379 void tracing_snapshot(void)
1381 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1383 EXPORT_SYMBOL_GPL(tracing_snapshot);
1384 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1386 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1388 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1389 int tracing_alloc_snapshot(void)
1391 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1394 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1395 void tracing_snapshot_alloc(void)
1400 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1401 void *tracing_cond_snapshot_data(struct trace_array *tr)
1405 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1406 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1410 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1411 int tracing_snapshot_cond_disable(struct trace_array *tr)
1415 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1416 #endif /* CONFIG_TRACER_SNAPSHOT */
1418 void tracer_tracing_off(struct trace_array *tr)
1420 if (tr->array_buffer.buffer)
1421 ring_buffer_record_off(tr->array_buffer.buffer);
1423 * This flag is looked at when buffers haven't been allocated
1424 * yet, or by some tracers (like irqsoff), that just want to
1425 * know if the ring buffer has been disabled, but it can handle
1426 * races of where it gets disabled but we still do a record.
1427 * As the check is in the fast path of the tracers, it is more
1428 * important to be fast than accurate.
1430 tr->buffer_disabled = 1;
1431 /* Make the flag seen by readers */
1436 * tracing_off - turn off tracing buffers
1438 * This function stops the tracing buffers from recording data.
1439 * It does not disable any overhead the tracers themselves may
1440 * be causing. This function simply causes all recording to
1441 * the ring buffers to fail.
1443 void tracing_off(void)
1445 tracer_tracing_off(&global_trace);
1447 EXPORT_SYMBOL_GPL(tracing_off);
1449 void disable_trace_on_warning(void)
1451 if (__disable_trace_on_warning) {
1452 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1453 "Disabling tracing due to warning\n");
1459 * tracer_tracing_is_on - show real state of ring buffer enabled
1460 * @tr : the trace array to know if ring buffer is enabled
1462 * Shows real state of the ring buffer if it is enabled or not.
1464 bool tracer_tracing_is_on(struct trace_array *tr)
1466 if (tr->array_buffer.buffer)
1467 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1468 return !tr->buffer_disabled;
1472 * tracing_is_on - show state of ring buffers enabled
1474 int tracing_is_on(void)
1476 return tracer_tracing_is_on(&global_trace);
1478 EXPORT_SYMBOL_GPL(tracing_is_on);
1480 static int __init set_buf_size(char *str)
1482 unsigned long buf_size;
1486 buf_size = memparse(str, &str);
1488 * nr_entries can not be zero and the startup
1489 * tests require some buffer space. Therefore
1490 * ensure we have at least 4096 bytes of buffer.
1492 trace_buf_size = max(4096UL, buf_size);
1495 __setup("trace_buf_size=", set_buf_size);
1497 static int __init set_tracing_thresh(char *str)
1499 unsigned long threshold;
1504 ret = kstrtoul(str, 0, &threshold);
1507 tracing_thresh = threshold * 1000;
1510 __setup("tracing_thresh=", set_tracing_thresh);
1512 unsigned long nsecs_to_usecs(unsigned long nsecs)
1514 return nsecs / 1000;
1518 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1519 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1520 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1521 * of strings in the order that the evals (enum) were defined.
1526 /* These must match the bit positions in trace_iterator_flags */
1527 static const char *trace_options[] = {
1535 int in_ns; /* is this clock in nanoseconds? */
1536 } trace_clocks[] = {
1537 { trace_clock_local, "local", 1 },
1538 { trace_clock_global, "global", 1 },
1539 { trace_clock_counter, "counter", 0 },
1540 { trace_clock_jiffies, "uptime", 0 },
1541 { trace_clock, "perf", 1 },
1542 { ktime_get_mono_fast_ns, "mono", 1 },
1543 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1544 { ktime_get_boot_fast_ns, "boot", 1 },
1548 bool trace_clock_in_ns(struct trace_array *tr)
1550 if (trace_clocks[tr->clock_id].in_ns)
1557 * trace_parser_get_init - gets the buffer for trace parser
1559 int trace_parser_get_init(struct trace_parser *parser, int size)
1561 memset(parser, 0, sizeof(*parser));
1563 parser->buffer = kmalloc(size, GFP_KERNEL);
1564 if (!parser->buffer)
1567 parser->size = size;
1572 * trace_parser_put - frees the buffer for trace parser
1574 void trace_parser_put(struct trace_parser *parser)
1576 kfree(parser->buffer);
1577 parser->buffer = NULL;
1581 * trace_get_user - reads the user input string separated by space
1582 * (matched by isspace(ch))
1584 * For each string found the 'struct trace_parser' is updated,
1585 * and the function returns.
1587 * Returns number of bytes read.
1589 * See kernel/trace/trace.h for 'struct trace_parser' details.
1591 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1592 size_t cnt, loff_t *ppos)
1599 trace_parser_clear(parser);
1601 ret = get_user(ch, ubuf++);
1609 * The parser is not finished with the last write,
1610 * continue reading the user input without skipping spaces.
1612 if (!parser->cont) {
1613 /* skip white space */
1614 while (cnt && isspace(ch)) {
1615 ret = get_user(ch, ubuf++);
1624 /* only spaces were written */
1625 if (isspace(ch) || !ch) {
1632 /* read the non-space input */
1633 while (cnt && !isspace(ch) && ch) {
1634 if (parser->idx < parser->size - 1)
1635 parser->buffer[parser->idx++] = ch;
1640 ret = get_user(ch, ubuf++);
1647 /* We either got finished input or we have to wait for another call. */
1648 if (isspace(ch) || !ch) {
1649 parser->buffer[parser->idx] = 0;
1650 parser->cont = false;
1651 } else if (parser->idx < parser->size - 1) {
1652 parser->cont = true;
1653 parser->buffer[parser->idx++] = ch;
1654 /* Make sure the parsed string always terminates with '\0'. */
1655 parser->buffer[parser->idx] = 0;
1668 /* TODO add a seq_buf_to_buffer() */
1669 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1673 if (trace_seq_used(s) <= s->seq.readpos)
1676 len = trace_seq_used(s) - s->seq.readpos;
1679 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1681 s->seq.readpos += cnt;
1685 unsigned long __read_mostly tracing_thresh;
1686 static const struct file_operations tracing_max_lat_fops;
1688 #ifdef LATENCY_FS_NOTIFY
1690 static struct workqueue_struct *fsnotify_wq;
1692 static void latency_fsnotify_workfn(struct work_struct *work)
1694 struct trace_array *tr = container_of(work, struct trace_array,
1696 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1699 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1701 struct trace_array *tr = container_of(iwork, struct trace_array,
1703 queue_work(fsnotify_wq, &tr->fsnotify_work);
1706 static void trace_create_maxlat_file(struct trace_array *tr,
1707 struct dentry *d_tracer)
1709 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1710 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1711 tr->d_max_latency = trace_create_file("tracing_max_latency",
1713 d_tracer, &tr->max_latency,
1714 &tracing_max_lat_fops);
1717 __init static int latency_fsnotify_init(void)
1719 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1720 WQ_UNBOUND | WQ_HIGHPRI, 0);
1722 pr_err("Unable to allocate tr_max_lat_wq\n");
1728 late_initcall_sync(latency_fsnotify_init);
1730 void latency_fsnotify(struct trace_array *tr)
1735 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1736 * possible that we are called from __schedule() or do_idle(), which
1737 * could cause a deadlock.
1739 irq_work_queue(&tr->fsnotify_irqwork);
1742 #elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
1743 || defined(CONFIG_OSNOISE_TRACER)
1745 #define trace_create_maxlat_file(tr, d_tracer) \
1746 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1747 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1750 #define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
1753 #ifdef CONFIG_TRACER_MAX_TRACE
1755 * Copy the new maximum trace into the separate maximum-trace
1756 * structure. (this way the maximum trace is permanently saved,
1757 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1760 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1762 struct array_buffer *trace_buf = &tr->array_buffer;
1763 struct array_buffer *max_buf = &tr->max_buffer;
1764 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1765 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1768 max_buf->time_start = data->preempt_timestamp;
1770 max_data->saved_latency = tr->max_latency;
1771 max_data->critical_start = data->critical_start;
1772 max_data->critical_end = data->critical_end;
1774 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1775 max_data->pid = tsk->pid;
1777 * If tsk == current, then use current_uid(), as that does not use
1778 * RCU. The irq tracer can be called out of RCU scope.
1781 max_data->uid = current_uid();
1783 max_data->uid = task_uid(tsk);
1785 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1786 max_data->policy = tsk->policy;
1787 max_data->rt_priority = tsk->rt_priority;
1789 /* record this tasks comm */
1790 tracing_record_cmdline(tsk);
1791 latency_fsnotify(tr);
1795 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1797 * @tsk: the task with the latency
1798 * @cpu: The cpu that initiated the trace.
1799 * @cond_data: User data associated with a conditional snapshot
1801 * Flip the buffers between the @tr and the max_tr and record information
1802 * about which task was the cause of this latency.
1805 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1811 WARN_ON_ONCE(!irqs_disabled());
1813 if (!tr->allocated_snapshot) {
1814 /* Only the nop tracer should hit this when disabling */
1815 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1819 arch_spin_lock(&tr->max_lock);
1821 /* Inherit the recordable setting from array_buffer */
1822 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1823 ring_buffer_record_on(tr->max_buffer.buffer);
1825 ring_buffer_record_off(tr->max_buffer.buffer);
1827 #ifdef CONFIG_TRACER_SNAPSHOT
1828 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1831 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1833 __update_max_tr(tr, tsk, cpu);
1836 arch_spin_unlock(&tr->max_lock);
1840 * update_max_tr_single - only copy one trace over, and reset the rest
1842 * @tsk: task with the latency
1843 * @cpu: the cpu of the buffer to copy.
1845 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1848 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1855 WARN_ON_ONCE(!irqs_disabled());
1856 if (!tr->allocated_snapshot) {
1857 /* Only the nop tracer should hit this when disabling */
1858 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1862 arch_spin_lock(&tr->max_lock);
1864 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1866 if (ret == -EBUSY) {
1868 * We failed to swap the buffer due to a commit taking
1869 * place on this CPU. We fail to record, but we reset
1870 * the max trace buffer (no one writes directly to it)
1871 * and flag that it failed.
1873 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1874 "Failed to swap buffers due to commit in progress\n");
1877 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1879 __update_max_tr(tr, tsk, cpu);
1880 arch_spin_unlock(&tr->max_lock);
1882 #endif /* CONFIG_TRACER_MAX_TRACE */
1884 static int wait_on_pipe(struct trace_iterator *iter, int full)
1886 /* Iterators are static, they should be filled or empty */
1887 if (trace_buffer_iter(iter, iter->cpu_file))
1890 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1894 #ifdef CONFIG_FTRACE_STARTUP_TEST
1895 static bool selftests_can_run;
1897 struct trace_selftests {
1898 struct list_head list;
1899 struct tracer *type;
1902 static LIST_HEAD(postponed_selftests);
1904 static int save_selftest(struct tracer *type)
1906 struct trace_selftests *selftest;
1908 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1912 selftest->type = type;
1913 list_add(&selftest->list, &postponed_selftests);
1917 static int run_tracer_selftest(struct tracer *type)
1919 struct trace_array *tr = &global_trace;
1920 struct tracer *saved_tracer = tr->current_trace;
1923 if (!type->selftest || tracing_selftest_disabled)
1927 * If a tracer registers early in boot up (before scheduling is
1928 * initialized and such), then do not run its selftests yet.
1929 * Instead, run it a little later in the boot process.
1931 if (!selftests_can_run)
1932 return save_selftest(type);
1934 if (!tracing_is_on()) {
1935 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1941 * Run a selftest on this tracer.
1942 * Here we reset the trace buffer, and set the current
1943 * tracer to be this tracer. The tracer can then run some
1944 * internal tracing to verify that everything is in order.
1945 * If we fail, we do not register this tracer.
1947 tracing_reset_online_cpus(&tr->array_buffer);
1949 tr->current_trace = type;
1951 #ifdef CONFIG_TRACER_MAX_TRACE
1952 if (type->use_max_tr) {
1953 /* If we expanded the buffers, make sure the max is expanded too */
1954 if (ring_buffer_expanded)
1955 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1956 RING_BUFFER_ALL_CPUS);
1957 tr->allocated_snapshot = true;
1961 /* the test is responsible for initializing and enabling */
1962 pr_info("Testing tracer %s: ", type->name);
1963 ret = type->selftest(type, tr);
1964 /* the test is responsible for resetting too */
1965 tr->current_trace = saved_tracer;
1967 printk(KERN_CONT "FAILED!\n");
1968 /* Add the warning after printing 'FAILED' */
1972 /* Only reset on passing, to avoid touching corrupted buffers */
1973 tracing_reset_online_cpus(&tr->array_buffer);
1975 #ifdef CONFIG_TRACER_MAX_TRACE
1976 if (type->use_max_tr) {
1977 tr->allocated_snapshot = false;
1979 /* Shrink the max buffer again */
1980 if (ring_buffer_expanded)
1981 ring_buffer_resize(tr->max_buffer.buffer, 1,
1982 RING_BUFFER_ALL_CPUS);
1986 printk(KERN_CONT "PASSED\n");
1990 static __init int init_trace_selftests(void)
1992 struct trace_selftests *p, *n;
1993 struct tracer *t, **last;
1996 selftests_can_run = true;
1998 mutex_lock(&trace_types_lock);
2000 if (list_empty(&postponed_selftests))
2003 pr_info("Running postponed tracer tests:\n");
2005 tracing_selftest_running = true;
2006 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2007 /* This loop can take minutes when sanitizers are enabled, so
2008 * lets make sure we allow RCU processing.
2011 ret = run_tracer_selftest(p->type);
2012 /* If the test fails, then warn and remove from available_tracers */
2014 WARN(1, "tracer: %s failed selftest, disabling\n",
2016 last = &trace_types;
2017 for (t = trace_types; t; t = t->next) {
2028 tracing_selftest_running = false;
2031 mutex_unlock(&trace_types_lock);
2035 core_initcall(init_trace_selftests);
2037 static inline int run_tracer_selftest(struct tracer *type)
2041 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2043 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2045 static void __init apply_trace_boot_options(void);
2048 * register_tracer - register a tracer with the ftrace system.
2049 * @type: the plugin for the tracer
2051 * Register a new plugin tracer.
2053 int __init register_tracer(struct tracer *type)
2059 pr_info("Tracer must have a name\n");
2063 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2064 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2068 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2069 pr_warn("Can not register tracer %s due to lockdown\n",
2074 mutex_lock(&trace_types_lock);
2076 tracing_selftest_running = true;
2078 for (t = trace_types; t; t = t->next) {
2079 if (strcmp(type->name, t->name) == 0) {
2081 pr_info("Tracer %s already registered\n",
2088 if (!type->set_flag)
2089 type->set_flag = &dummy_set_flag;
2091 /*allocate a dummy tracer_flags*/
2092 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2097 type->flags->val = 0;
2098 type->flags->opts = dummy_tracer_opt;
2100 if (!type->flags->opts)
2101 type->flags->opts = dummy_tracer_opt;
2103 /* store the tracer for __set_tracer_option */
2104 type->flags->trace = type;
2106 ret = run_tracer_selftest(type);
2110 type->next = trace_types;
2112 add_tracer_options(&global_trace, type);
2115 tracing_selftest_running = false;
2116 mutex_unlock(&trace_types_lock);
2118 if (ret || !default_bootup_tracer)
2121 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2124 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2125 /* Do we want this tracer to start on bootup? */
2126 tracing_set_tracer(&global_trace, type->name);
2127 default_bootup_tracer = NULL;
2129 apply_trace_boot_options();
2131 /* disable other selftests, since this will break it. */
2132 disable_tracing_selftest("running a tracer");
2138 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2140 struct trace_buffer *buffer = buf->buffer;
2145 ring_buffer_record_disable(buffer);
2147 /* Make sure all commits have finished */
2149 ring_buffer_reset_cpu(buffer, cpu);
2151 ring_buffer_record_enable(buffer);
2154 void tracing_reset_online_cpus(struct array_buffer *buf)
2156 struct trace_buffer *buffer = buf->buffer;
2161 ring_buffer_record_disable(buffer);
2163 /* Make sure all commits have finished */
2166 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2168 ring_buffer_reset_online_cpus(buffer);
2170 ring_buffer_record_enable(buffer);
2173 /* Must have trace_types_lock held */
2174 void tracing_reset_all_online_cpus(void)
2176 struct trace_array *tr;
2178 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2179 if (!tr->clear_trace)
2181 tr->clear_trace = false;
2182 tracing_reset_online_cpus(&tr->array_buffer);
2183 #ifdef CONFIG_TRACER_MAX_TRACE
2184 tracing_reset_online_cpus(&tr->max_buffer);
2190 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2191 * is the tgid last observed corresponding to pid=i.
2193 static int *tgid_map;
2195 /* The maximum valid index into tgid_map. */
2196 static size_t tgid_map_max;
2198 #define SAVED_CMDLINES_DEFAULT 128
2199 #define NO_CMDLINE_MAP UINT_MAX
2200 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2201 struct saved_cmdlines_buffer {
2202 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2203 unsigned *map_cmdline_to_pid;
2204 unsigned cmdline_num;
2206 char *saved_cmdlines;
2208 static struct saved_cmdlines_buffer *savedcmd;
2210 static inline char *get_saved_cmdlines(int idx)
2212 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2215 static inline void set_cmdline(int idx, const char *cmdline)
2217 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2220 static int allocate_cmdlines_buffer(unsigned int val,
2221 struct saved_cmdlines_buffer *s)
2223 s->map_cmdline_to_pid = kmalloc_array(val,
2224 sizeof(*s->map_cmdline_to_pid),
2226 if (!s->map_cmdline_to_pid)
2229 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2230 if (!s->saved_cmdlines) {
2231 kfree(s->map_cmdline_to_pid);
2236 s->cmdline_num = val;
2237 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2238 sizeof(s->map_pid_to_cmdline));
2239 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2240 val * sizeof(*s->map_cmdline_to_pid));
2245 static int trace_create_savedcmd(void)
2249 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2253 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2263 int is_tracing_stopped(void)
2265 return global_trace.stop_count;
2269 * tracing_start - quick start of the tracer
2271 * If tracing is enabled but was stopped by tracing_stop,
2272 * this will start the tracer back up.
2274 void tracing_start(void)
2276 struct trace_buffer *buffer;
2277 unsigned long flags;
2279 if (tracing_disabled)
2282 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2283 if (--global_trace.stop_count) {
2284 if (global_trace.stop_count < 0) {
2285 /* Someone screwed up their debugging */
2287 global_trace.stop_count = 0;
2292 /* Prevent the buffers from switching */
2293 arch_spin_lock(&global_trace.max_lock);
2295 buffer = global_trace.array_buffer.buffer;
2297 ring_buffer_record_enable(buffer);
2299 #ifdef CONFIG_TRACER_MAX_TRACE
2300 buffer = global_trace.max_buffer.buffer;
2302 ring_buffer_record_enable(buffer);
2305 arch_spin_unlock(&global_trace.max_lock);
2308 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2311 static void tracing_start_tr(struct trace_array *tr)
2313 struct trace_buffer *buffer;
2314 unsigned long flags;
2316 if (tracing_disabled)
2319 /* If global, we need to also start the max tracer */
2320 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2321 return tracing_start();
2323 raw_spin_lock_irqsave(&tr->start_lock, flags);
2325 if (--tr->stop_count) {
2326 if (tr->stop_count < 0) {
2327 /* Someone screwed up their debugging */
2334 buffer = tr->array_buffer.buffer;
2336 ring_buffer_record_enable(buffer);
2339 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2343 * tracing_stop - quick stop of the tracer
2345 * Light weight way to stop tracing. Use in conjunction with
2348 void tracing_stop(void)
2350 struct trace_buffer *buffer;
2351 unsigned long flags;
2353 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2354 if (global_trace.stop_count++)
2357 /* Prevent the buffers from switching */
2358 arch_spin_lock(&global_trace.max_lock);
2360 buffer = global_trace.array_buffer.buffer;
2362 ring_buffer_record_disable(buffer);
2364 #ifdef CONFIG_TRACER_MAX_TRACE
2365 buffer = global_trace.max_buffer.buffer;
2367 ring_buffer_record_disable(buffer);
2370 arch_spin_unlock(&global_trace.max_lock);
2373 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2376 static void tracing_stop_tr(struct trace_array *tr)
2378 struct trace_buffer *buffer;
2379 unsigned long flags;
2381 /* If global, we need to also stop the max tracer */
2382 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2383 return tracing_stop();
2385 raw_spin_lock_irqsave(&tr->start_lock, flags);
2386 if (tr->stop_count++)
2389 buffer = tr->array_buffer.buffer;
2391 ring_buffer_record_disable(buffer);
2394 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2397 static int trace_save_cmdline(struct task_struct *tsk)
2401 /* treat recording of idle task as a success */
2405 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2408 * It's not the end of the world if we don't get
2409 * the lock, but we also don't want to spin
2410 * nor do we want to disable interrupts,
2411 * so if we miss here, then better luck next time.
2413 if (!arch_spin_trylock(&trace_cmdline_lock))
2416 idx = savedcmd->map_pid_to_cmdline[tpid];
2417 if (idx == NO_CMDLINE_MAP) {
2418 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2420 savedcmd->map_pid_to_cmdline[tpid] = idx;
2421 savedcmd->cmdline_idx = idx;
2424 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2425 set_cmdline(idx, tsk->comm);
2427 arch_spin_unlock(&trace_cmdline_lock);
2432 static void __trace_find_cmdline(int pid, char comm[])
2438 strcpy(comm, "<idle>");
2442 if (WARN_ON_ONCE(pid < 0)) {
2443 strcpy(comm, "<XXX>");
2447 tpid = pid & (PID_MAX_DEFAULT - 1);
2448 map = savedcmd->map_pid_to_cmdline[tpid];
2449 if (map != NO_CMDLINE_MAP) {
2450 tpid = savedcmd->map_cmdline_to_pid[map];
2452 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2456 strcpy(comm, "<...>");
2459 void trace_find_cmdline(int pid, char comm[])
2462 arch_spin_lock(&trace_cmdline_lock);
2464 __trace_find_cmdline(pid, comm);
2466 arch_spin_unlock(&trace_cmdline_lock);
2470 static int *trace_find_tgid_ptr(int pid)
2473 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2474 * if we observe a non-NULL tgid_map then we also observe the correct
2477 int *map = smp_load_acquire(&tgid_map);
2479 if (unlikely(!map || pid > tgid_map_max))
2485 int trace_find_tgid(int pid)
2487 int *ptr = trace_find_tgid_ptr(pid);
2489 return ptr ? *ptr : 0;
2492 static int trace_save_tgid(struct task_struct *tsk)
2496 /* treat recording of idle task as a success */
2500 ptr = trace_find_tgid_ptr(tsk->pid);
2508 static bool tracing_record_taskinfo_skip(int flags)
2510 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2512 if (!__this_cpu_read(trace_taskinfo_save))
2518 * tracing_record_taskinfo - record the task info of a task
2520 * @task: task to record
2521 * @flags: TRACE_RECORD_CMDLINE for recording comm
2522 * TRACE_RECORD_TGID for recording tgid
2524 void tracing_record_taskinfo(struct task_struct *task, int flags)
2528 if (tracing_record_taskinfo_skip(flags))
2532 * Record as much task information as possible. If some fail, continue
2533 * to try to record the others.
2535 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2536 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2538 /* If recording any information failed, retry again soon. */
2542 __this_cpu_write(trace_taskinfo_save, false);
2546 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2548 * @prev: previous task during sched_switch
2549 * @next: next task during sched_switch
2550 * @flags: TRACE_RECORD_CMDLINE for recording comm
2551 * TRACE_RECORD_TGID for recording tgid
2553 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2554 struct task_struct *next, int flags)
2558 if (tracing_record_taskinfo_skip(flags))
2562 * Record as much task information as possible. If some fail, continue
2563 * to try to record the others.
2565 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2566 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2567 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2568 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2570 /* If recording any information failed, retry again soon. */
2574 __this_cpu_write(trace_taskinfo_save, false);
2577 /* Helpers to record a specific task information */
2578 void tracing_record_cmdline(struct task_struct *task)
2580 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2583 void tracing_record_tgid(struct task_struct *task)
2585 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2589 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2590 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2591 * simplifies those functions and keeps them in sync.
2593 enum print_line_t trace_handle_return(struct trace_seq *s)
2595 return trace_seq_has_overflowed(s) ?
2596 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2598 EXPORT_SYMBOL_GPL(trace_handle_return);
2600 static unsigned short migration_disable_value(void)
2602 #if defined(CONFIG_SMP)
2603 return current->migration_disabled;
2609 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2611 unsigned int trace_flags = irqs_status;
2614 pc = preempt_count();
2617 trace_flags |= TRACE_FLAG_NMI;
2618 if (pc & HARDIRQ_MASK)
2619 trace_flags |= TRACE_FLAG_HARDIRQ;
2620 if (in_serving_softirq())
2621 trace_flags |= TRACE_FLAG_SOFTIRQ;
2622 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2623 trace_flags |= TRACE_FLAG_BH_OFF;
2625 if (tif_need_resched())
2626 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2627 if (test_preempt_need_resched())
2628 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2629 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2630 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2633 struct ring_buffer_event *
2634 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2637 unsigned int trace_ctx)
2639 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2642 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2643 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2644 static int trace_buffered_event_ref;
2647 * trace_buffered_event_enable - enable buffering events
2649 * When events are being filtered, it is quicker to use a temporary
2650 * buffer to write the event data into if there's a likely chance
2651 * that it will not be committed. The discard of the ring buffer
2652 * is not as fast as committing, and is much slower than copying
2655 * When an event is to be filtered, allocate per cpu buffers to
2656 * write the event data into, and if the event is filtered and discarded
2657 * it is simply dropped, otherwise, the entire data is to be committed
2660 void trace_buffered_event_enable(void)
2662 struct ring_buffer_event *event;
2666 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2668 if (trace_buffered_event_ref++)
2671 for_each_tracing_cpu(cpu) {
2672 page = alloc_pages_node(cpu_to_node(cpu),
2673 GFP_KERNEL | __GFP_NORETRY, 0);
2677 event = page_address(page);
2678 memset(event, 0, sizeof(*event));
2680 per_cpu(trace_buffered_event, cpu) = event;
2683 if (cpu == smp_processor_id() &&
2684 __this_cpu_read(trace_buffered_event) !=
2685 per_cpu(trace_buffered_event, cpu))
2692 trace_buffered_event_disable();
2695 static void enable_trace_buffered_event(void *data)
2697 /* Probably not needed, but do it anyway */
2699 this_cpu_dec(trace_buffered_event_cnt);
2702 static void disable_trace_buffered_event(void *data)
2704 this_cpu_inc(trace_buffered_event_cnt);
2708 * trace_buffered_event_disable - disable buffering events
2710 * When a filter is removed, it is faster to not use the buffered
2711 * events, and to commit directly into the ring buffer. Free up
2712 * the temp buffers when there are no more users. This requires
2713 * special synchronization with current events.
2715 void trace_buffered_event_disable(void)
2719 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2721 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2724 if (--trace_buffered_event_ref)
2728 /* For each CPU, set the buffer as used. */
2729 smp_call_function_many(tracing_buffer_mask,
2730 disable_trace_buffered_event, NULL, 1);
2733 /* Wait for all current users to finish */
2736 for_each_tracing_cpu(cpu) {
2737 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2738 per_cpu(trace_buffered_event, cpu) = NULL;
2741 * Make sure trace_buffered_event is NULL before clearing
2742 * trace_buffered_event_cnt.
2747 /* Do the work on each cpu */
2748 smp_call_function_many(tracing_buffer_mask,
2749 enable_trace_buffered_event, NULL, 1);
2753 static struct trace_buffer *temp_buffer;
2755 struct ring_buffer_event *
2756 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2757 struct trace_event_file *trace_file,
2758 int type, unsigned long len,
2759 unsigned int trace_ctx)
2761 struct ring_buffer_event *entry;
2762 struct trace_array *tr = trace_file->tr;
2765 *current_rb = tr->array_buffer.buffer;
2767 if (!tr->no_filter_buffering_ref &&
2768 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2769 preempt_disable_notrace();
2771 * Filtering is on, so try to use the per cpu buffer first.
2772 * This buffer will simulate a ring_buffer_event,
2773 * where the type_len is zero and the array[0] will
2774 * hold the full length.
2775 * (see include/linux/ring-buffer.h for details on
2776 * how the ring_buffer_event is structured).
2778 * Using a temp buffer during filtering and copying it
2779 * on a matched filter is quicker than writing directly
2780 * into the ring buffer and then discarding it when
2781 * it doesn't match. That is because the discard
2782 * requires several atomic operations to get right.
2783 * Copying on match and doing nothing on a failed match
2784 * is still quicker than no copy on match, but having
2785 * to discard out of the ring buffer on a failed match.
2787 if ((entry = __this_cpu_read(trace_buffered_event))) {
2788 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2790 val = this_cpu_inc_return(trace_buffered_event_cnt);
2793 * Preemption is disabled, but interrupts and NMIs
2794 * can still come in now. If that happens after
2795 * the above increment, then it will have to go
2796 * back to the old method of allocating the event
2797 * on the ring buffer, and if the filter fails, it
2798 * will have to call ring_buffer_discard_commit()
2801 * Need to also check the unlikely case that the
2802 * length is bigger than the temp buffer size.
2803 * If that happens, then the reserve is pretty much
2804 * guaranteed to fail, as the ring buffer currently
2805 * only allows events less than a page. But that may
2806 * change in the future, so let the ring buffer reserve
2807 * handle the failure in that case.
2809 if (val == 1 && likely(len <= max_len)) {
2810 trace_event_setup(entry, type, trace_ctx);
2811 entry->array[0] = len;
2812 /* Return with preemption disabled */
2815 this_cpu_dec(trace_buffered_event_cnt);
2817 /* __trace_buffer_lock_reserve() disables preemption */
2818 preempt_enable_notrace();
2821 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2824 * If tracing is off, but we have triggers enabled
2825 * we still need to look at the event data. Use the temp_buffer
2826 * to store the trace event for the trigger to use. It's recursive
2827 * safe and will not be recorded anywhere.
2829 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2830 *current_rb = temp_buffer;
2831 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2836 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2838 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2839 static DEFINE_MUTEX(tracepoint_printk_mutex);
2841 static void output_printk(struct trace_event_buffer *fbuffer)
2843 struct trace_event_call *event_call;
2844 struct trace_event_file *file;
2845 struct trace_event *event;
2846 unsigned long flags;
2847 struct trace_iterator *iter = tracepoint_print_iter;
2849 /* We should never get here if iter is NULL */
2850 if (WARN_ON_ONCE(!iter))
2853 event_call = fbuffer->trace_file->event_call;
2854 if (!event_call || !event_call->event.funcs ||
2855 !event_call->event.funcs->trace)
2858 file = fbuffer->trace_file;
2859 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2860 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2861 !filter_match_preds(file->filter, fbuffer->entry)))
2864 event = &fbuffer->trace_file->event_call->event;
2866 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2867 trace_seq_init(&iter->seq);
2868 iter->ent = fbuffer->entry;
2869 event_call->event.funcs->trace(iter, 0, event);
2870 trace_seq_putc(&iter->seq, 0);
2871 printk("%s", iter->seq.buffer);
2873 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2876 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2877 void *buffer, size_t *lenp,
2880 int save_tracepoint_printk;
2883 mutex_lock(&tracepoint_printk_mutex);
2884 save_tracepoint_printk = tracepoint_printk;
2886 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2889 * This will force exiting early, as tracepoint_printk
2890 * is always zero when tracepoint_printk_iter is not allocated
2892 if (!tracepoint_print_iter)
2893 tracepoint_printk = 0;
2895 if (save_tracepoint_printk == tracepoint_printk)
2898 if (tracepoint_printk)
2899 static_key_enable(&tracepoint_printk_key.key);
2901 static_key_disable(&tracepoint_printk_key.key);
2904 mutex_unlock(&tracepoint_printk_mutex);
2909 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2911 enum event_trigger_type tt = ETT_NONE;
2912 struct trace_event_file *file = fbuffer->trace_file;
2914 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2915 fbuffer->entry, &tt))
2918 if (static_key_false(&tracepoint_printk_key.key))
2919 output_printk(fbuffer);
2921 if (static_branch_unlikely(&trace_event_exports_enabled))
2922 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2924 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2925 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2929 event_triggers_post_call(file, tt);
2932 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2937 * trace_buffer_unlock_commit_regs()
2938 * trace_event_buffer_commit()
2939 * trace_event_raw_event_xxx()
2941 # define STACK_SKIP 3
2943 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2944 struct trace_buffer *buffer,
2945 struct ring_buffer_event *event,
2946 unsigned int trace_ctx,
2947 struct pt_regs *regs)
2949 __buffer_unlock_commit(buffer, event);
2952 * If regs is not set, then skip the necessary functions.
2953 * Note, we can still get here via blktrace, wakeup tracer
2954 * and mmiotrace, but that's ok if they lose a function or
2955 * two. They are not that meaningful.
2957 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2958 ftrace_trace_userstack(tr, buffer, trace_ctx);
2962 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2965 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2966 struct ring_buffer_event *event)
2968 __buffer_unlock_commit(buffer, event);
2972 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2973 parent_ip, unsigned int trace_ctx)
2975 struct trace_event_call *call = &event_function;
2976 struct trace_buffer *buffer = tr->array_buffer.buffer;
2977 struct ring_buffer_event *event;
2978 struct ftrace_entry *entry;
2980 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2984 entry = ring_buffer_event_data(event);
2986 entry->parent_ip = parent_ip;
2988 if (!call_filter_check_discard(call, entry, buffer, event)) {
2989 if (static_branch_unlikely(&trace_function_exports_enabled))
2990 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2991 __buffer_unlock_commit(buffer, event);
2995 #ifdef CONFIG_STACKTRACE
2997 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2998 #define FTRACE_KSTACK_NESTING 4
3000 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3002 struct ftrace_stack {
3003 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3007 struct ftrace_stacks {
3008 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3011 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3012 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3014 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3015 unsigned int trace_ctx,
3016 int skip, struct pt_regs *regs)
3018 struct trace_event_call *call = &event_kernel_stack;
3019 struct ring_buffer_event *event;
3020 unsigned int size, nr_entries;
3021 struct ftrace_stack *fstack;
3022 struct stack_entry *entry;
3026 * Add one, for this function and the call to save_stack_trace()
3027 * If regs is set, then these functions will not be in the way.
3029 #ifndef CONFIG_UNWINDER_ORC
3034 preempt_disable_notrace();
3036 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3038 /* This should never happen. If it does, yell once and skip */
3039 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3043 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3044 * interrupt will either see the value pre increment or post
3045 * increment. If the interrupt happens pre increment it will have
3046 * restored the counter when it returns. We just need a barrier to
3047 * keep gcc from moving things around.
3051 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3052 size = ARRAY_SIZE(fstack->calls);
3055 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3058 nr_entries = stack_trace_save(fstack->calls, size, skip);
3061 size = nr_entries * sizeof(unsigned long);
3062 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3063 (sizeof(*entry) - sizeof(entry->caller)) + size,
3067 entry = ring_buffer_event_data(event);
3069 memcpy(&entry->caller, fstack->calls, size);
3070 entry->size = nr_entries;
3072 if (!call_filter_check_discard(call, entry, buffer, event))
3073 __buffer_unlock_commit(buffer, event);
3076 /* Again, don't let gcc optimize things here */
3078 __this_cpu_dec(ftrace_stack_reserve);
3079 preempt_enable_notrace();
3083 static inline void ftrace_trace_stack(struct trace_array *tr,
3084 struct trace_buffer *buffer,
3085 unsigned int trace_ctx,
3086 int skip, struct pt_regs *regs)
3088 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3091 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3094 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3097 struct trace_buffer *buffer = tr->array_buffer.buffer;
3099 if (rcu_is_watching()) {
3100 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3105 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3106 * but if the above rcu_is_watching() failed, then the NMI
3107 * triggered someplace critical, and rcu_irq_enter() should
3108 * not be called from NMI.
3110 if (unlikely(in_nmi()))
3113 rcu_irq_enter_irqson();
3114 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3115 rcu_irq_exit_irqson();
3119 * trace_dump_stack - record a stack back trace in the trace buffer
3120 * @skip: Number of functions to skip (helper handlers)
3122 void trace_dump_stack(int skip)
3124 if (tracing_disabled || tracing_selftest_running)
3127 #ifndef CONFIG_UNWINDER_ORC
3128 /* Skip 1 to skip this function. */
3131 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3132 tracing_gen_ctx(), skip, NULL);
3134 EXPORT_SYMBOL_GPL(trace_dump_stack);
3136 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3137 static DEFINE_PER_CPU(int, user_stack_count);
3140 ftrace_trace_userstack(struct trace_array *tr,
3141 struct trace_buffer *buffer, unsigned int trace_ctx)
3143 struct trace_event_call *call = &event_user_stack;
3144 struct ring_buffer_event *event;
3145 struct userstack_entry *entry;
3147 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3151 * NMIs can not handle page faults, even with fix ups.
3152 * The save user stack can (and often does) fault.
3154 if (unlikely(in_nmi()))
3158 * prevent recursion, since the user stack tracing may
3159 * trigger other kernel events.
3162 if (__this_cpu_read(user_stack_count))
3165 __this_cpu_inc(user_stack_count);
3167 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3168 sizeof(*entry), trace_ctx);
3170 goto out_drop_count;
3171 entry = ring_buffer_event_data(event);
3173 entry->tgid = current->tgid;
3174 memset(&entry->caller, 0, sizeof(entry->caller));
3176 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3177 if (!call_filter_check_discard(call, entry, buffer, event))
3178 __buffer_unlock_commit(buffer, event);
3181 __this_cpu_dec(user_stack_count);
3185 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3186 static void ftrace_trace_userstack(struct trace_array *tr,
3187 struct trace_buffer *buffer,
3188 unsigned int trace_ctx)
3191 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3193 #endif /* CONFIG_STACKTRACE */
3196 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3197 unsigned long long delta)
3199 entry->bottom_delta_ts = delta & U32_MAX;
3200 entry->top_delta_ts = (delta >> 32);
3203 void trace_last_func_repeats(struct trace_array *tr,
3204 struct trace_func_repeats *last_info,
3205 unsigned int trace_ctx)
3207 struct trace_buffer *buffer = tr->array_buffer.buffer;
3208 struct func_repeats_entry *entry;
3209 struct ring_buffer_event *event;
3212 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3213 sizeof(*entry), trace_ctx);
3217 delta = ring_buffer_event_time_stamp(buffer, event) -
3218 last_info->ts_last_call;
3220 entry = ring_buffer_event_data(event);
3221 entry->ip = last_info->ip;
3222 entry->parent_ip = last_info->parent_ip;
3223 entry->count = last_info->count;
3224 func_repeats_set_delta_ts(entry, delta);
3226 __buffer_unlock_commit(buffer, event);
3229 /* created for use with alloc_percpu */
3230 struct trace_buffer_struct {
3232 char buffer[4][TRACE_BUF_SIZE];
3235 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3238 * This allows for lockless recording. If we're nested too deeply, then
3239 * this returns NULL.
3241 static char *get_trace_buf(void)
3243 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3245 if (!trace_percpu_buffer || buffer->nesting >= 4)
3250 /* Interrupts must see nesting incremented before we use the buffer */
3252 return &buffer->buffer[buffer->nesting - 1][0];
3255 static void put_trace_buf(void)
3257 /* Don't let the decrement of nesting leak before this */
3259 this_cpu_dec(trace_percpu_buffer->nesting);
3262 static int alloc_percpu_trace_buffer(void)
3264 struct trace_buffer_struct __percpu *buffers;
3266 if (trace_percpu_buffer)
3269 buffers = alloc_percpu(struct trace_buffer_struct);
3270 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3273 trace_percpu_buffer = buffers;
3277 static int buffers_allocated;
3279 void trace_printk_init_buffers(void)
3281 if (buffers_allocated)
3284 if (alloc_percpu_trace_buffer())
3287 /* trace_printk() is for debug use only. Don't use it in production. */
3290 pr_warn("**********************************************************\n");
3291 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3293 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3295 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3296 pr_warn("** unsafe for production use. **\n");
3298 pr_warn("** If you see this message and you are not debugging **\n");
3299 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3301 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3302 pr_warn("**********************************************************\n");
3304 /* Expand the buffers to set size */
3305 tracing_update_buffers();
3307 buffers_allocated = 1;
3310 * trace_printk_init_buffers() can be called by modules.
3311 * If that happens, then we need to start cmdline recording
3312 * directly here. If the global_trace.buffer is already
3313 * allocated here, then this was called by module code.
3315 if (global_trace.array_buffer.buffer)
3316 tracing_start_cmdline_record();
3318 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3320 void trace_printk_start_comm(void)
3322 /* Start tracing comms if trace printk is set */
3323 if (!buffers_allocated)
3325 tracing_start_cmdline_record();
3328 static void trace_printk_start_stop_comm(int enabled)
3330 if (!buffers_allocated)
3334 tracing_start_cmdline_record();
3336 tracing_stop_cmdline_record();
3340 * trace_vbprintk - write binary msg to tracing buffer
3341 * @ip: The address of the caller
3342 * @fmt: The string format to write to the buffer
3343 * @args: Arguments for @fmt
3345 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3347 struct trace_event_call *call = &event_bprint;
3348 struct ring_buffer_event *event;
3349 struct trace_buffer *buffer;
3350 struct trace_array *tr = &global_trace;
3351 struct bprint_entry *entry;
3352 unsigned int trace_ctx;
3356 if (unlikely(tracing_selftest_running || tracing_disabled))
3359 /* Don't pollute graph traces with trace_vprintk internals */
3360 pause_graph_tracing();
3362 trace_ctx = tracing_gen_ctx();
3363 preempt_disable_notrace();
3365 tbuffer = get_trace_buf();
3371 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3373 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3376 size = sizeof(*entry) + sizeof(u32) * len;
3377 buffer = tr->array_buffer.buffer;
3378 ring_buffer_nest_start(buffer);
3379 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3383 entry = ring_buffer_event_data(event);
3387 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3388 if (!call_filter_check_discard(call, entry, buffer, event)) {
3389 __buffer_unlock_commit(buffer, event);
3390 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3394 ring_buffer_nest_end(buffer);
3399 preempt_enable_notrace();
3400 unpause_graph_tracing();
3404 EXPORT_SYMBOL_GPL(trace_vbprintk);
3408 __trace_array_vprintk(struct trace_buffer *buffer,
3409 unsigned long ip, const char *fmt, va_list args)
3411 struct trace_event_call *call = &event_print;
3412 struct ring_buffer_event *event;
3414 struct print_entry *entry;
3415 unsigned int trace_ctx;
3418 if (tracing_disabled || tracing_selftest_running)
3421 /* Don't pollute graph traces with trace_vprintk internals */
3422 pause_graph_tracing();
3424 trace_ctx = tracing_gen_ctx();
3425 preempt_disable_notrace();
3428 tbuffer = get_trace_buf();
3434 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3436 size = sizeof(*entry) + len + 1;
3437 ring_buffer_nest_start(buffer);
3438 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3442 entry = ring_buffer_event_data(event);
3445 memcpy(&entry->buf, tbuffer, len + 1);
3446 if (!call_filter_check_discard(call, entry, buffer, event)) {
3447 __buffer_unlock_commit(buffer, event);
3448 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3452 ring_buffer_nest_end(buffer);
3456 preempt_enable_notrace();
3457 unpause_graph_tracing();
3463 int trace_array_vprintk(struct trace_array *tr,
3464 unsigned long ip, const char *fmt, va_list args)
3466 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3470 * trace_array_printk - Print a message to a specific instance
3471 * @tr: The instance trace_array descriptor
3472 * @ip: The instruction pointer that this is called from.
3473 * @fmt: The format to print (printf format)
3475 * If a subsystem sets up its own instance, they have the right to
3476 * printk strings into their tracing instance buffer using this
3477 * function. Note, this function will not write into the top level
3478 * buffer (use trace_printk() for that), as writing into the top level
3479 * buffer should only have events that can be individually disabled.
3480 * trace_printk() is only used for debugging a kernel, and should not
3481 * be ever incorporated in normal use.
3483 * trace_array_printk() can be used, as it will not add noise to the
3484 * top level tracing buffer.
3486 * Note, trace_array_init_printk() must be called on @tr before this
3490 int trace_array_printk(struct trace_array *tr,
3491 unsigned long ip, const char *fmt, ...)
3499 /* This is only allowed for created instances */
3500 if (tr == &global_trace)
3503 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3507 ret = trace_array_vprintk(tr, ip, fmt, ap);
3511 EXPORT_SYMBOL_GPL(trace_array_printk);
3514 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3515 * @tr: The trace array to initialize the buffers for
3517 * As trace_array_printk() only writes into instances, they are OK to
3518 * have in the kernel (unlike trace_printk()). This needs to be called
3519 * before trace_array_printk() can be used on a trace_array.
3521 int trace_array_init_printk(struct trace_array *tr)
3526 /* This is only allowed for created instances */
3527 if (tr == &global_trace)
3530 return alloc_percpu_trace_buffer();
3532 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3535 int trace_array_printk_buf(struct trace_buffer *buffer,
3536 unsigned long ip, const char *fmt, ...)
3541 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3545 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3551 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3553 return trace_array_vprintk(&global_trace, ip, fmt, args);
3555 EXPORT_SYMBOL_GPL(trace_vprintk);
3557 static void trace_iterator_increment(struct trace_iterator *iter)
3559 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3563 ring_buffer_iter_advance(buf_iter);
3566 static struct trace_entry *
3567 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3568 unsigned long *lost_events)
3570 struct ring_buffer_event *event;
3571 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3574 event = ring_buffer_iter_peek(buf_iter, ts);
3576 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3577 (unsigned long)-1 : 0;
3579 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3584 iter->ent_size = ring_buffer_event_length(event);
3585 return ring_buffer_event_data(event);
3591 static struct trace_entry *
3592 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3593 unsigned long *missing_events, u64 *ent_ts)
3595 struct trace_buffer *buffer = iter->array_buffer->buffer;
3596 struct trace_entry *ent, *next = NULL;
3597 unsigned long lost_events = 0, next_lost = 0;
3598 int cpu_file = iter->cpu_file;
3599 u64 next_ts = 0, ts;
3605 * If we are in a per_cpu trace file, don't bother by iterating over
3606 * all cpu and peek directly.
3608 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3609 if (ring_buffer_empty_cpu(buffer, cpu_file))
3611 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3613 *ent_cpu = cpu_file;
3618 for_each_tracing_cpu(cpu) {
3620 if (ring_buffer_empty_cpu(buffer, cpu))
3623 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3626 * Pick the entry with the smallest timestamp:
3628 if (ent && (!next || ts < next_ts)) {
3632 next_lost = lost_events;
3633 next_size = iter->ent_size;
3637 iter->ent_size = next_size;
3640 *ent_cpu = next_cpu;
3646 *missing_events = next_lost;
3651 #define STATIC_FMT_BUF_SIZE 128
3652 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3654 static char *trace_iter_expand_format(struct trace_iterator *iter)
3659 * iter->tr is NULL when used with tp_printk, which makes
3660 * this get called where it is not safe to call krealloc().
3662 if (!iter->tr || iter->fmt == static_fmt_buf)
3665 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3668 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3675 /* Returns true if the string is safe to dereference from an event */
3676 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3679 unsigned long addr = (unsigned long)str;
3680 struct trace_event *trace_event;
3681 struct trace_event_call *event;
3683 /* Ignore strings with no length */
3687 /* OK if part of the event data */
3688 if ((addr >= (unsigned long)iter->ent) &&
3689 (addr < (unsigned long)iter->ent + iter->ent_size))
3692 /* OK if part of the temp seq buffer */
3693 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3694 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3697 /* Core rodata can not be freed */
3698 if (is_kernel_rodata(addr))
3701 if (trace_is_tracepoint_string(str))
3705 * Now this could be a module event, referencing core module
3706 * data, which is OK.
3711 trace_event = ftrace_find_event(iter->ent->type);
3715 event = container_of(trace_event, struct trace_event_call, event);
3716 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3719 /* Would rather have rodata, but this will suffice */
3720 if (within_module_core(addr, event->module))
3726 static const char *show_buffer(struct trace_seq *s)
3728 struct seq_buf *seq = &s->seq;
3730 seq_buf_terminate(seq);
3735 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3737 static int test_can_verify_check(const char *fmt, ...)
3744 * The verifier is dependent on vsnprintf() modifies the va_list
3745 * passed to it, where it is sent as a reference. Some architectures
3746 * (like x86_32) passes it by value, which means that vsnprintf()
3747 * does not modify the va_list passed to it, and the verifier
3748 * would then need to be able to understand all the values that
3749 * vsnprintf can use. If it is passed by value, then the verifier
3753 vsnprintf(buf, 16, "%d", ap);
3754 ret = va_arg(ap, int);
3760 static void test_can_verify(void)
3762 if (!test_can_verify_check("%d %d", 0, 1)) {
3763 pr_info("trace event string verifier disabled\n");
3764 static_branch_inc(&trace_no_verify);
3769 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3770 * @iter: The iterator that holds the seq buffer and the event being printed
3771 * @fmt: The format used to print the event
3772 * @ap: The va_list holding the data to print from @fmt.
3774 * This writes the data into the @iter->seq buffer using the data from
3775 * @fmt and @ap. If the format has a %s, then the source of the string
3776 * is examined to make sure it is safe to print, otherwise it will
3777 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3780 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3783 const char *p = fmt;
3787 if (WARN_ON_ONCE(!fmt))
3790 if (static_branch_unlikely(&trace_no_verify))
3793 /* Don't bother checking when doing a ftrace_dump() */
3794 if (iter->fmt == static_fmt_buf)
3803 /* We only care about %s and variants */
3804 for (i = 0; p[i]; i++) {
3805 if (i + 1 >= iter->fmt_size) {
3807 * If we can't expand the copy buffer,
3810 if (!trace_iter_expand_format(iter))
3814 if (p[i] == '\\' && p[i+1]) {
3819 /* Need to test cases like %08.*s */
3820 for (j = 1; p[i+j]; j++) {
3821 if (isdigit(p[i+j]) ||
3824 if (p[i+j] == '*') {
3836 /* If no %s found then just print normally */
3840 /* Copy up to the %s, and print that */
3841 strncpy(iter->fmt, p, i);
3842 iter->fmt[i] = '\0';
3843 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3846 * If iter->seq is full, the above call no longer guarantees
3847 * that ap is in sync with fmt processing, and further calls
3848 * to va_arg() can return wrong positional arguments.
3850 * Ensure that ap is no longer used in this case.
3852 if (iter->seq.full) {
3858 len = va_arg(ap, int);
3860 /* The ap now points to the string data of the %s */
3861 str = va_arg(ap, const char *);
3864 * If you hit this warning, it is likely that the
3865 * trace event in question used %s on a string that
3866 * was saved at the time of the event, but may not be
3867 * around when the trace is read. Use __string(),
3868 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3869 * instead. See samples/trace_events/trace-events-sample.h
3872 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3873 "fmt: '%s' current_buffer: '%s'",
3874 fmt, show_buffer(&iter->seq))) {
3877 /* Try to safely read the string */
3879 if (len + 1 > iter->fmt_size)
3880 len = iter->fmt_size - 1;
3883 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3887 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3891 trace_seq_printf(&iter->seq, "(0x%px)", str);
3893 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3895 str = "[UNSAFE-MEMORY]";
3896 strcpy(iter->fmt, "%s");
3898 strncpy(iter->fmt, p + i, j + 1);
3899 iter->fmt[j+1] = '\0';
3902 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3904 trace_seq_printf(&iter->seq, iter->fmt, str);
3910 trace_seq_vprintf(&iter->seq, p, ap);
3913 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3915 const char *p, *new_fmt;
3918 if (WARN_ON_ONCE(!fmt))
3921 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3925 new_fmt = q = iter->fmt;
3927 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3928 if (!trace_iter_expand_format(iter))
3931 q += iter->fmt - new_fmt;
3932 new_fmt = iter->fmt;
3937 /* Replace %p with %px */
3941 } else if (p[0] == 'p' && !isalnum(p[1])) {
3952 #define STATIC_TEMP_BUF_SIZE 128
3953 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3955 /* Find the next real entry, without updating the iterator itself */
3956 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3957 int *ent_cpu, u64 *ent_ts)
3959 /* __find_next_entry will reset ent_size */
3960 int ent_size = iter->ent_size;
3961 struct trace_entry *entry;
3964 * If called from ftrace_dump(), then the iter->temp buffer
3965 * will be the static_temp_buf and not created from kmalloc.
3966 * If the entry size is greater than the buffer, we can
3967 * not save it. Just return NULL in that case. This is only
3968 * used to add markers when two consecutive events' time
3969 * stamps have a large delta. See trace_print_lat_context()
3971 if (iter->temp == static_temp_buf &&
3972 STATIC_TEMP_BUF_SIZE < ent_size)
3976 * The __find_next_entry() may call peek_next_entry(), which may
3977 * call ring_buffer_peek() that may make the contents of iter->ent
3978 * undefined. Need to copy iter->ent now.
3980 if (iter->ent && iter->ent != iter->temp) {
3981 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3982 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3984 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3989 iter->temp_size = iter->ent_size;
3991 memcpy(iter->temp, iter->ent, iter->ent_size);
3992 iter->ent = iter->temp;
3994 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3995 /* Put back the original ent_size */
3996 iter->ent_size = ent_size;
4001 /* Find the next real entry, and increment the iterator to the next entry */
4002 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4004 iter->ent = __find_next_entry(iter, &iter->cpu,
4005 &iter->lost_events, &iter->ts);
4008 trace_iterator_increment(iter);
4010 return iter->ent ? iter : NULL;
4013 static void trace_consume(struct trace_iterator *iter)
4015 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4016 &iter->lost_events);
4019 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4021 struct trace_iterator *iter = m->private;
4025 WARN_ON_ONCE(iter->leftover);
4029 /* can't go backwards */
4034 ent = trace_find_next_entry_inc(iter);
4038 while (ent && iter->idx < i)
4039 ent = trace_find_next_entry_inc(iter);
4046 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4048 struct ring_buffer_iter *buf_iter;
4049 unsigned long entries = 0;
4052 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4054 buf_iter = trace_buffer_iter(iter, cpu);
4058 ring_buffer_iter_reset(buf_iter);
4061 * We could have the case with the max latency tracers
4062 * that a reset never took place on a cpu. This is evident
4063 * by the timestamp being before the start of the buffer.
4065 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4066 if (ts >= iter->array_buffer->time_start)
4069 ring_buffer_iter_advance(buf_iter);
4072 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4076 * The current tracer is copied to avoid a global locking
4079 static void *s_start(struct seq_file *m, loff_t *pos)
4081 struct trace_iterator *iter = m->private;
4082 struct trace_array *tr = iter->tr;
4083 int cpu_file = iter->cpu_file;
4089 * copy the tracer to avoid using a global lock all around.
4090 * iter->trace is a copy of current_trace, the pointer to the
4091 * name may be used instead of a strcmp(), as iter->trace->name
4092 * will point to the same string as current_trace->name.
4094 mutex_lock(&trace_types_lock);
4095 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4096 *iter->trace = *tr->current_trace;
4097 mutex_unlock(&trace_types_lock);
4099 #ifdef CONFIG_TRACER_MAX_TRACE
4100 if (iter->snapshot && iter->trace->use_max_tr)
4101 return ERR_PTR(-EBUSY);
4104 if (*pos != iter->pos) {
4109 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4110 for_each_tracing_cpu(cpu)
4111 tracing_iter_reset(iter, cpu);
4113 tracing_iter_reset(iter, cpu_file);
4116 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4121 * If we overflowed the seq_file before, then we want
4122 * to just reuse the trace_seq buffer again.
4128 p = s_next(m, p, &l);
4132 trace_event_read_lock();
4133 trace_access_lock(cpu_file);
4137 static void s_stop(struct seq_file *m, void *p)
4139 struct trace_iterator *iter = m->private;
4141 #ifdef CONFIG_TRACER_MAX_TRACE
4142 if (iter->snapshot && iter->trace->use_max_tr)
4146 trace_access_unlock(iter->cpu_file);
4147 trace_event_read_unlock();
4151 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4152 unsigned long *entries, int cpu)
4154 unsigned long count;
4156 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4158 * If this buffer has skipped entries, then we hold all
4159 * entries for the trace and we need to ignore the
4160 * ones before the time stamp.
4162 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4163 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4164 /* total is the same as the entries */
4168 ring_buffer_overrun_cpu(buf->buffer, cpu);
4173 get_total_entries(struct array_buffer *buf,
4174 unsigned long *total, unsigned long *entries)
4182 for_each_tracing_cpu(cpu) {
4183 get_total_entries_cpu(buf, &t, &e, cpu);
4189 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4191 unsigned long total, entries;
4196 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4201 unsigned long trace_total_entries(struct trace_array *tr)
4203 unsigned long total, entries;
4208 get_total_entries(&tr->array_buffer, &total, &entries);
4213 static void print_lat_help_header(struct seq_file *m)
4215 seq_puts(m, "# _------=> CPU# \n"
4216 "# / _-----=> irqs-off/BH-disabled\n"
4217 "# | / _----=> need-resched \n"
4218 "# || / _---=> hardirq/softirq \n"
4219 "# ||| / _--=> preempt-depth \n"
4220 "# |||| / _-=> migrate-disable \n"
4221 "# ||||| / delay \n"
4222 "# cmd pid |||||| time | caller \n"
4223 "# \\ / |||||| \\ | / \n");
4226 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4228 unsigned long total;
4229 unsigned long entries;
4231 get_total_entries(buf, &total, &entries);
4232 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4233 entries, total, num_online_cpus());
4237 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4240 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4242 print_event_info(buf, m);
4244 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4245 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4248 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4251 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4252 const char *space = " ";
4253 int prec = tgid ? 12 : 2;
4255 print_event_info(buf, m);
4257 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4258 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4259 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4260 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4261 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4262 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4263 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4264 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4268 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4270 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4271 struct array_buffer *buf = iter->array_buffer;
4272 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4273 struct tracer *type = iter->trace;
4274 unsigned long entries;
4275 unsigned long total;
4276 const char *name = "preemption";
4280 get_total_entries(buf, &total, &entries);
4282 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4284 seq_puts(m, "# -----------------------------------"
4285 "---------------------------------\n");
4286 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4287 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4288 nsecs_to_usecs(data->saved_latency),
4292 preempt_model_none() ? "server" :
4293 preempt_model_voluntary() ? "desktop" :
4294 preempt_model_full() ? "preempt" :
4295 preempt_model_rt() ? "preempt_rt" :
4297 /* These are reserved for later use */
4300 seq_printf(m, " #P:%d)\n", num_online_cpus());
4304 seq_puts(m, "# -----------------\n");
4305 seq_printf(m, "# | task: %.16s-%d "
4306 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4307 data->comm, data->pid,
4308 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4309 data->policy, data->rt_priority);
4310 seq_puts(m, "# -----------------\n");
4312 if (data->critical_start) {
4313 seq_puts(m, "# => started at: ");
4314 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4315 trace_print_seq(m, &iter->seq);
4316 seq_puts(m, "\n# => ended at: ");
4317 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4318 trace_print_seq(m, &iter->seq);
4319 seq_puts(m, "\n#\n");
4325 static void test_cpu_buff_start(struct trace_iterator *iter)
4327 struct trace_seq *s = &iter->seq;
4328 struct trace_array *tr = iter->tr;
4330 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4333 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4336 if (cpumask_available(iter->started) &&
4337 cpumask_test_cpu(iter->cpu, iter->started))
4340 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4343 if (cpumask_available(iter->started))
4344 cpumask_set_cpu(iter->cpu, iter->started);
4346 /* Don't print started cpu buffer for the first entry of the trace */
4348 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4352 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4354 struct trace_array *tr = iter->tr;
4355 struct trace_seq *s = &iter->seq;
4356 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4357 struct trace_entry *entry;
4358 struct trace_event *event;
4362 test_cpu_buff_start(iter);
4364 event = ftrace_find_event(entry->type);
4366 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4367 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4368 trace_print_lat_context(iter);
4370 trace_print_context(iter);
4373 if (trace_seq_has_overflowed(s))
4374 return TRACE_TYPE_PARTIAL_LINE;
4377 return event->funcs->trace(iter, sym_flags, event);
4379 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4381 return trace_handle_return(s);
4384 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4386 struct trace_array *tr = iter->tr;
4387 struct trace_seq *s = &iter->seq;
4388 struct trace_entry *entry;
4389 struct trace_event *event;
4393 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4394 trace_seq_printf(s, "%d %d %llu ",
4395 entry->pid, iter->cpu, iter->ts);
4397 if (trace_seq_has_overflowed(s))
4398 return TRACE_TYPE_PARTIAL_LINE;
4400 event = ftrace_find_event(entry->type);
4402 return event->funcs->raw(iter, 0, event);
4404 trace_seq_printf(s, "%d ?\n", entry->type);
4406 return trace_handle_return(s);
4409 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4411 struct trace_array *tr = iter->tr;
4412 struct trace_seq *s = &iter->seq;
4413 unsigned char newline = '\n';
4414 struct trace_entry *entry;
4415 struct trace_event *event;
4419 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4420 SEQ_PUT_HEX_FIELD(s, entry->pid);
4421 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4422 SEQ_PUT_HEX_FIELD(s, iter->ts);
4423 if (trace_seq_has_overflowed(s))
4424 return TRACE_TYPE_PARTIAL_LINE;
4427 event = ftrace_find_event(entry->type);
4429 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4430 if (ret != TRACE_TYPE_HANDLED)
4434 SEQ_PUT_FIELD(s, newline);
4436 return trace_handle_return(s);
4439 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4441 struct trace_array *tr = iter->tr;
4442 struct trace_seq *s = &iter->seq;
4443 struct trace_entry *entry;
4444 struct trace_event *event;
4448 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4449 SEQ_PUT_FIELD(s, entry->pid);
4450 SEQ_PUT_FIELD(s, iter->cpu);
4451 SEQ_PUT_FIELD(s, iter->ts);
4452 if (trace_seq_has_overflowed(s))
4453 return TRACE_TYPE_PARTIAL_LINE;
4456 event = ftrace_find_event(entry->type);
4457 return event ? event->funcs->binary(iter, 0, event) :
4461 int trace_empty(struct trace_iterator *iter)
4463 struct ring_buffer_iter *buf_iter;
4466 /* If we are looking at one CPU buffer, only check that one */
4467 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4468 cpu = iter->cpu_file;
4469 buf_iter = trace_buffer_iter(iter, cpu);
4471 if (!ring_buffer_iter_empty(buf_iter))
4474 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4480 for_each_tracing_cpu(cpu) {
4481 buf_iter = trace_buffer_iter(iter, cpu);
4483 if (!ring_buffer_iter_empty(buf_iter))
4486 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4494 /* Called with trace_event_read_lock() held. */
4495 enum print_line_t print_trace_line(struct trace_iterator *iter)
4497 struct trace_array *tr = iter->tr;
4498 unsigned long trace_flags = tr->trace_flags;
4499 enum print_line_t ret;
4501 if (iter->lost_events) {
4502 if (iter->lost_events == (unsigned long)-1)
4503 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4506 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4507 iter->cpu, iter->lost_events);
4508 if (trace_seq_has_overflowed(&iter->seq))
4509 return TRACE_TYPE_PARTIAL_LINE;
4512 if (iter->trace && iter->trace->print_line) {
4513 ret = iter->trace->print_line(iter);
4514 if (ret != TRACE_TYPE_UNHANDLED)
4518 if (iter->ent->type == TRACE_BPUTS &&
4519 trace_flags & TRACE_ITER_PRINTK &&
4520 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4521 return trace_print_bputs_msg_only(iter);
4523 if (iter->ent->type == TRACE_BPRINT &&
4524 trace_flags & TRACE_ITER_PRINTK &&
4525 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4526 return trace_print_bprintk_msg_only(iter);
4528 if (iter->ent->type == TRACE_PRINT &&
4529 trace_flags & TRACE_ITER_PRINTK &&
4530 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4531 return trace_print_printk_msg_only(iter);
4533 if (trace_flags & TRACE_ITER_BIN)
4534 return print_bin_fmt(iter);
4536 if (trace_flags & TRACE_ITER_HEX)
4537 return print_hex_fmt(iter);
4539 if (trace_flags & TRACE_ITER_RAW)
4540 return print_raw_fmt(iter);
4542 return print_trace_fmt(iter);
4545 void trace_latency_header(struct seq_file *m)
4547 struct trace_iterator *iter = m->private;
4548 struct trace_array *tr = iter->tr;
4550 /* print nothing if the buffers are empty */
4551 if (trace_empty(iter))
4554 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4555 print_trace_header(m, iter);
4557 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4558 print_lat_help_header(m);
4561 void trace_default_header(struct seq_file *m)
4563 struct trace_iterator *iter = m->private;
4564 struct trace_array *tr = iter->tr;
4565 unsigned long trace_flags = tr->trace_flags;
4567 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4570 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4571 /* print nothing if the buffers are empty */
4572 if (trace_empty(iter))
4574 print_trace_header(m, iter);
4575 if (!(trace_flags & TRACE_ITER_VERBOSE))
4576 print_lat_help_header(m);
4578 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4579 if (trace_flags & TRACE_ITER_IRQ_INFO)
4580 print_func_help_header_irq(iter->array_buffer,
4583 print_func_help_header(iter->array_buffer, m,
4589 static void test_ftrace_alive(struct seq_file *m)
4591 if (!ftrace_is_dead())
4593 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4594 "# MAY BE MISSING FUNCTION EVENTS\n");
4597 #ifdef CONFIG_TRACER_MAX_TRACE
4598 static void show_snapshot_main_help(struct seq_file *m)
4600 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4601 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4602 "# Takes a snapshot of the main buffer.\n"
4603 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4604 "# (Doesn't have to be '2' works with any number that\n"
4605 "# is not a '0' or '1')\n");
4608 static void show_snapshot_percpu_help(struct seq_file *m)
4610 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4611 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4612 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4613 "# Takes a snapshot of the main buffer for this cpu.\n");
4615 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4616 "# Must use main snapshot file to allocate.\n");
4618 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4619 "# (Doesn't have to be '2' works with any number that\n"
4620 "# is not a '0' or '1')\n");
4623 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4625 if (iter->tr->allocated_snapshot)
4626 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4628 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4630 seq_puts(m, "# Snapshot commands:\n");
4631 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4632 show_snapshot_main_help(m);
4634 show_snapshot_percpu_help(m);
4637 /* Should never be called */
4638 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4641 static int s_show(struct seq_file *m, void *v)
4643 struct trace_iterator *iter = v;
4646 if (iter->ent == NULL) {
4648 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4650 test_ftrace_alive(m);
4652 if (iter->snapshot && trace_empty(iter))
4653 print_snapshot_help(m, iter);
4654 else if (iter->trace && iter->trace->print_header)
4655 iter->trace->print_header(m);
4657 trace_default_header(m);
4659 } else if (iter->leftover) {
4661 * If we filled the seq_file buffer earlier, we
4662 * want to just show it now.
4664 ret = trace_print_seq(m, &iter->seq);
4666 /* ret should this time be zero, but you never know */
4667 iter->leftover = ret;
4670 print_trace_line(iter);
4671 ret = trace_print_seq(m, &iter->seq);
4673 * If we overflow the seq_file buffer, then it will
4674 * ask us for this data again at start up.
4676 * ret is 0 if seq_file write succeeded.
4679 iter->leftover = ret;
4686 * Should be used after trace_array_get(), trace_types_lock
4687 * ensures that i_cdev was already initialized.
4689 static inline int tracing_get_cpu(struct inode *inode)
4691 if (inode->i_cdev) /* See trace_create_cpu_file() */
4692 return (long)inode->i_cdev - 1;
4693 return RING_BUFFER_ALL_CPUS;
4696 static const struct seq_operations tracer_seq_ops = {
4703 static struct trace_iterator *
4704 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4706 struct trace_array *tr = inode->i_private;
4707 struct trace_iterator *iter;
4710 if (tracing_disabled)
4711 return ERR_PTR(-ENODEV);
4713 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4715 return ERR_PTR(-ENOMEM);
4717 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4719 if (!iter->buffer_iter)
4723 * trace_find_next_entry() may need to save off iter->ent.
4724 * It will place it into the iter->temp buffer. As most
4725 * events are less than 128, allocate a buffer of that size.
4726 * If one is greater, then trace_find_next_entry() will
4727 * allocate a new buffer to adjust for the bigger iter->ent.
4728 * It's not critical if it fails to get allocated here.
4730 iter->temp = kmalloc(128, GFP_KERNEL);
4732 iter->temp_size = 128;
4735 * trace_event_printf() may need to modify given format
4736 * string to replace %p with %px so that it shows real address
4737 * instead of hash value. However, that is only for the event
4738 * tracing, other tracer may not need. Defer the allocation
4739 * until it is needed.
4745 * We make a copy of the current tracer to avoid concurrent
4746 * changes on it while we are reading.
4748 mutex_lock(&trace_types_lock);
4749 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4753 *iter->trace = *tr->current_trace;
4755 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4760 #ifdef CONFIG_TRACER_MAX_TRACE
4761 /* Currently only the top directory has a snapshot */
4762 if (tr->current_trace->print_max || snapshot)
4763 iter->array_buffer = &tr->max_buffer;
4766 iter->array_buffer = &tr->array_buffer;
4767 iter->snapshot = snapshot;
4769 iter->cpu_file = tracing_get_cpu(inode);
4770 mutex_init(&iter->mutex);
4772 /* Notify the tracer early; before we stop tracing. */
4773 if (iter->trace->open)
4774 iter->trace->open(iter);
4776 /* Annotate start of buffers if we had overruns */
4777 if (ring_buffer_overruns(iter->array_buffer->buffer))
4778 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4780 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4781 if (trace_clocks[tr->clock_id].in_ns)
4782 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4785 * If pause-on-trace is enabled, then stop the trace while
4786 * dumping, unless this is the "snapshot" file
4788 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4789 tracing_stop_tr(tr);
4791 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4792 for_each_tracing_cpu(cpu) {
4793 iter->buffer_iter[cpu] =
4794 ring_buffer_read_prepare(iter->array_buffer->buffer,
4797 ring_buffer_read_prepare_sync();
4798 for_each_tracing_cpu(cpu) {
4799 ring_buffer_read_start(iter->buffer_iter[cpu]);
4800 tracing_iter_reset(iter, cpu);
4803 cpu = iter->cpu_file;
4804 iter->buffer_iter[cpu] =
4805 ring_buffer_read_prepare(iter->array_buffer->buffer,
4807 ring_buffer_read_prepare_sync();
4808 ring_buffer_read_start(iter->buffer_iter[cpu]);
4809 tracing_iter_reset(iter, cpu);
4812 mutex_unlock(&trace_types_lock);
4817 mutex_unlock(&trace_types_lock);
4820 kfree(iter->buffer_iter);
4822 seq_release_private(inode, file);
4823 return ERR_PTR(-ENOMEM);
4826 int tracing_open_generic(struct inode *inode, struct file *filp)
4830 ret = tracing_check_open_get_tr(NULL);
4834 filp->private_data = inode->i_private;
4838 bool tracing_is_disabled(void)
4840 return (tracing_disabled) ? true: false;
4844 * Open and update trace_array ref count.
4845 * Must have the current trace_array passed to it.
4847 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4849 struct trace_array *tr = inode->i_private;
4852 ret = tracing_check_open_get_tr(tr);
4856 filp->private_data = inode->i_private;
4861 static int tracing_mark_open(struct inode *inode, struct file *filp)
4863 stream_open(inode, filp);
4864 return tracing_open_generic_tr(inode, filp);
4867 static int tracing_release(struct inode *inode, struct file *file)
4869 struct trace_array *tr = inode->i_private;
4870 struct seq_file *m = file->private_data;
4871 struct trace_iterator *iter;
4874 if (!(file->f_mode & FMODE_READ)) {
4875 trace_array_put(tr);
4879 /* Writes do not use seq_file */
4881 mutex_lock(&trace_types_lock);
4883 for_each_tracing_cpu(cpu) {
4884 if (iter->buffer_iter[cpu])
4885 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4888 if (iter->trace && iter->trace->close)
4889 iter->trace->close(iter);
4891 if (!iter->snapshot && tr->stop_count)
4892 /* reenable tracing if it was previously enabled */
4893 tracing_start_tr(tr);
4895 __trace_array_put(tr);
4897 mutex_unlock(&trace_types_lock);
4899 mutex_destroy(&iter->mutex);
4900 free_cpumask_var(iter->started);
4904 kfree(iter->buffer_iter);
4905 seq_release_private(inode, file);
4910 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4912 struct trace_array *tr = inode->i_private;
4914 trace_array_put(tr);
4918 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4920 struct trace_array *tr = inode->i_private;
4922 trace_array_put(tr);
4924 return single_release(inode, file);
4927 static int tracing_open(struct inode *inode, struct file *file)
4929 struct trace_array *tr = inode->i_private;
4930 struct trace_iterator *iter;
4933 ret = tracing_check_open_get_tr(tr);
4937 /* If this file was open for write, then erase contents */
4938 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4939 int cpu = tracing_get_cpu(inode);
4940 struct array_buffer *trace_buf = &tr->array_buffer;
4942 #ifdef CONFIG_TRACER_MAX_TRACE
4943 if (tr->current_trace->print_max)
4944 trace_buf = &tr->max_buffer;
4947 if (cpu == RING_BUFFER_ALL_CPUS)
4948 tracing_reset_online_cpus(trace_buf);
4950 tracing_reset_cpu(trace_buf, cpu);
4953 if (file->f_mode & FMODE_READ) {
4954 iter = __tracing_open(inode, file, false);
4956 ret = PTR_ERR(iter);
4957 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4958 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4962 trace_array_put(tr);
4968 * Some tracers are not suitable for instance buffers.
4969 * A tracer is always available for the global array (toplevel)
4970 * or if it explicitly states that it is.
4973 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4975 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4978 /* Find the next tracer that this trace array may use */
4979 static struct tracer *
4980 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4982 while (t && !trace_ok_for_array(t, tr))
4989 t_next(struct seq_file *m, void *v, loff_t *pos)
4991 struct trace_array *tr = m->private;
4992 struct tracer *t = v;
4997 t = get_tracer_for_array(tr, t->next);
5002 static void *t_start(struct seq_file *m, loff_t *pos)
5004 struct trace_array *tr = m->private;
5008 mutex_lock(&trace_types_lock);
5010 t = get_tracer_for_array(tr, trace_types);
5011 for (; t && l < *pos; t = t_next(m, t, &l))
5017 static void t_stop(struct seq_file *m, void *p)
5019 mutex_unlock(&trace_types_lock);
5022 static int t_show(struct seq_file *m, void *v)
5024 struct tracer *t = v;
5029 seq_puts(m, t->name);
5038 static const struct seq_operations show_traces_seq_ops = {
5045 static int show_traces_open(struct inode *inode, struct file *file)
5047 struct trace_array *tr = inode->i_private;
5051 ret = tracing_check_open_get_tr(tr);
5055 ret = seq_open(file, &show_traces_seq_ops);
5057 trace_array_put(tr);
5061 m = file->private_data;
5067 static int show_traces_release(struct inode *inode, struct file *file)
5069 struct trace_array *tr = inode->i_private;
5071 trace_array_put(tr);
5072 return seq_release(inode, file);
5076 tracing_write_stub(struct file *filp, const char __user *ubuf,
5077 size_t count, loff_t *ppos)
5082 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5086 if (file->f_mode & FMODE_READ)
5087 ret = seq_lseek(file, offset, whence);
5089 file->f_pos = ret = 0;
5094 static const struct file_operations tracing_fops = {
5095 .open = tracing_open,
5097 .write = tracing_write_stub,
5098 .llseek = tracing_lseek,
5099 .release = tracing_release,
5102 static const struct file_operations show_traces_fops = {
5103 .open = show_traces_open,
5105 .llseek = seq_lseek,
5106 .release = show_traces_release,
5110 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5111 size_t count, loff_t *ppos)
5113 struct trace_array *tr = file_inode(filp)->i_private;
5117 len = snprintf(NULL, 0, "%*pb\n",
5118 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5119 mask_str = kmalloc(len, GFP_KERNEL);
5123 len = snprintf(mask_str, len, "%*pb\n",
5124 cpumask_pr_args(tr->tracing_cpumask));
5129 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5137 int tracing_set_cpumask(struct trace_array *tr,
5138 cpumask_var_t tracing_cpumask_new)
5145 local_irq_disable();
5146 arch_spin_lock(&tr->max_lock);
5147 for_each_tracing_cpu(cpu) {
5149 * Increase/decrease the disabled counter if we are
5150 * about to flip a bit in the cpumask:
5152 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5153 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5154 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5155 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5157 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5158 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5159 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5160 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5163 arch_spin_unlock(&tr->max_lock);
5166 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5172 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5173 size_t count, loff_t *ppos)
5175 struct trace_array *tr = file_inode(filp)->i_private;
5176 cpumask_var_t tracing_cpumask_new;
5179 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5182 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5186 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5190 free_cpumask_var(tracing_cpumask_new);
5195 free_cpumask_var(tracing_cpumask_new);
5200 static const struct file_operations tracing_cpumask_fops = {
5201 .open = tracing_open_generic_tr,
5202 .read = tracing_cpumask_read,
5203 .write = tracing_cpumask_write,
5204 .release = tracing_release_generic_tr,
5205 .llseek = generic_file_llseek,
5208 static int tracing_trace_options_show(struct seq_file *m, void *v)
5210 struct tracer_opt *trace_opts;
5211 struct trace_array *tr = m->private;
5215 mutex_lock(&trace_types_lock);
5216 tracer_flags = tr->current_trace->flags->val;
5217 trace_opts = tr->current_trace->flags->opts;
5219 for (i = 0; trace_options[i]; i++) {
5220 if (tr->trace_flags & (1 << i))
5221 seq_printf(m, "%s\n", trace_options[i]);
5223 seq_printf(m, "no%s\n", trace_options[i]);
5226 for (i = 0; trace_opts[i].name; i++) {
5227 if (tracer_flags & trace_opts[i].bit)
5228 seq_printf(m, "%s\n", trace_opts[i].name);
5230 seq_printf(m, "no%s\n", trace_opts[i].name);
5232 mutex_unlock(&trace_types_lock);
5237 static int __set_tracer_option(struct trace_array *tr,
5238 struct tracer_flags *tracer_flags,
5239 struct tracer_opt *opts, int neg)
5241 struct tracer *trace = tracer_flags->trace;
5244 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5249 tracer_flags->val &= ~opts->bit;
5251 tracer_flags->val |= opts->bit;
5255 /* Try to assign a tracer specific option */
5256 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5258 struct tracer *trace = tr->current_trace;
5259 struct tracer_flags *tracer_flags = trace->flags;
5260 struct tracer_opt *opts = NULL;
5263 for (i = 0; tracer_flags->opts[i].name; i++) {
5264 opts = &tracer_flags->opts[i];
5266 if (strcmp(cmp, opts->name) == 0)
5267 return __set_tracer_option(tr, trace->flags, opts, neg);
5273 /* Some tracers require overwrite to stay enabled */
5274 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5276 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5282 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5286 if ((mask == TRACE_ITER_RECORD_TGID) ||
5287 (mask == TRACE_ITER_RECORD_CMD))
5288 lockdep_assert_held(&event_mutex);
5290 /* do nothing if flag is already set */
5291 if (!!(tr->trace_flags & mask) == !!enabled)
5294 /* Give the tracer a chance to approve the change */
5295 if (tr->current_trace->flag_changed)
5296 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5300 tr->trace_flags |= mask;
5302 tr->trace_flags &= ~mask;
5304 if (mask == TRACE_ITER_RECORD_CMD)
5305 trace_event_enable_cmd_record(enabled);
5307 if (mask == TRACE_ITER_RECORD_TGID) {
5309 tgid_map_max = pid_max;
5310 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5314 * Pairs with smp_load_acquire() in
5315 * trace_find_tgid_ptr() to ensure that if it observes
5316 * the tgid_map we just allocated then it also observes
5317 * the corresponding tgid_map_max value.
5319 smp_store_release(&tgid_map, map);
5322 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5326 trace_event_enable_tgid_record(enabled);
5329 if (mask == TRACE_ITER_EVENT_FORK)
5330 trace_event_follow_fork(tr, enabled);
5332 if (mask == TRACE_ITER_FUNC_FORK)
5333 ftrace_pid_follow_fork(tr, enabled);
5335 if (mask == TRACE_ITER_OVERWRITE) {
5336 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5337 #ifdef CONFIG_TRACER_MAX_TRACE
5338 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5342 if (mask == TRACE_ITER_PRINTK) {
5343 trace_printk_start_stop_comm(enabled);
5344 trace_printk_control(enabled);
5350 int trace_set_options(struct trace_array *tr, char *option)
5355 size_t orig_len = strlen(option);
5358 cmp = strstrip(option);
5360 len = str_has_prefix(cmp, "no");
5366 mutex_lock(&event_mutex);
5367 mutex_lock(&trace_types_lock);
5369 ret = match_string(trace_options, -1, cmp);
5370 /* If no option could be set, test the specific tracer options */
5372 ret = set_tracer_option(tr, cmp, neg);
5374 ret = set_tracer_flag(tr, 1 << ret, !neg);
5376 mutex_unlock(&trace_types_lock);
5377 mutex_unlock(&event_mutex);
5380 * If the first trailing whitespace is replaced with '\0' by strstrip,
5381 * turn it back into a space.
5383 if (orig_len > strlen(option))
5384 option[strlen(option)] = ' ';
5389 static void __init apply_trace_boot_options(void)
5391 char *buf = trace_boot_options_buf;
5395 option = strsep(&buf, ",");
5401 trace_set_options(&global_trace, option);
5403 /* Put back the comma to allow this to be called again */
5410 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5411 size_t cnt, loff_t *ppos)
5413 struct seq_file *m = filp->private_data;
5414 struct trace_array *tr = m->private;
5418 if (cnt >= sizeof(buf))
5421 if (copy_from_user(buf, ubuf, cnt))
5426 ret = trace_set_options(tr, buf);
5435 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5437 struct trace_array *tr = inode->i_private;
5440 ret = tracing_check_open_get_tr(tr);
5444 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5446 trace_array_put(tr);
5451 static const struct file_operations tracing_iter_fops = {
5452 .open = tracing_trace_options_open,
5454 .llseek = seq_lseek,
5455 .release = tracing_single_release_tr,
5456 .write = tracing_trace_options_write,
5459 static const char readme_msg[] =
5460 "tracing mini-HOWTO:\n\n"
5461 "# echo 0 > tracing_on : quick way to disable tracing\n"
5462 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5463 " Important files:\n"
5464 " trace\t\t\t- The static contents of the buffer\n"
5465 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5466 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5467 " current_tracer\t- function and latency tracers\n"
5468 " available_tracers\t- list of configured tracers for current_tracer\n"
5469 " error_log\t- error log for failed commands (that support it)\n"
5470 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5471 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5472 " trace_clock\t\t-change the clock used to order events\n"
5473 " local: Per cpu clock but may not be synced across CPUs\n"
5474 " global: Synced across CPUs but slows tracing down.\n"
5475 " counter: Not a clock, but just an increment\n"
5476 " uptime: Jiffy counter from time of boot\n"
5477 " perf: Same clock that perf events use\n"
5478 #ifdef CONFIG_X86_64
5479 " x86-tsc: TSC cycle counter\n"
5481 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5482 " delta: Delta difference against a buffer-wide timestamp\n"
5483 " absolute: Absolute (standalone) timestamp\n"
5484 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5485 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5486 " tracing_cpumask\t- Limit which CPUs to trace\n"
5487 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5488 "\t\t\t Remove sub-buffer with rmdir\n"
5489 " trace_options\t\t- Set format or modify how tracing happens\n"
5490 "\t\t\t Disable an option by prefixing 'no' to the\n"
5491 "\t\t\t option name\n"
5492 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5493 #ifdef CONFIG_DYNAMIC_FTRACE
5494 "\n available_filter_functions - list of functions that can be filtered on\n"
5495 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5496 "\t\t\t functions\n"
5497 "\t accepts: func_full_name or glob-matching-pattern\n"
5498 "\t modules: Can select a group via module\n"
5499 "\t Format: :mod:<module-name>\n"
5500 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5501 "\t triggers: a command to perform when function is hit\n"
5502 "\t Format: <function>:<trigger>[:count]\n"
5503 "\t trigger: traceon, traceoff\n"
5504 "\t\t enable_event:<system>:<event>\n"
5505 "\t\t disable_event:<system>:<event>\n"
5506 #ifdef CONFIG_STACKTRACE
5509 #ifdef CONFIG_TRACER_SNAPSHOT
5514 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5515 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5516 "\t The first one will disable tracing every time do_fault is hit\n"
5517 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5518 "\t The first time do trap is hit and it disables tracing, the\n"
5519 "\t counter will decrement to 2. If tracing is already disabled,\n"
5520 "\t the counter will not decrement. It only decrements when the\n"
5521 "\t trigger did work\n"
5522 "\t To remove trigger without count:\n"
5523 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5524 "\t To remove trigger with a count:\n"
5525 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5526 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5527 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5528 "\t modules: Can select a group via module command :mod:\n"
5529 "\t Does not accept triggers\n"
5530 #endif /* CONFIG_DYNAMIC_FTRACE */
5531 #ifdef CONFIG_FUNCTION_TRACER
5532 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5534 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5537 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5538 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5539 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5540 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5542 #ifdef CONFIG_TRACER_SNAPSHOT
5543 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5544 "\t\t\t snapshot buffer. Read the contents for more\n"
5545 "\t\t\t information\n"
5547 #ifdef CONFIG_STACK_TRACER
5548 " stack_trace\t\t- Shows the max stack trace when active\n"
5549 " stack_max_size\t- Shows current max stack size that was traced\n"
5550 "\t\t\t Write into this file to reset the max size (trigger a\n"
5551 "\t\t\t new trace)\n"
5552 #ifdef CONFIG_DYNAMIC_FTRACE
5553 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5556 #endif /* CONFIG_STACK_TRACER */
5557 #ifdef CONFIG_DYNAMIC_EVENTS
5558 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5559 "\t\t\t Write into this file to define/undefine new trace events.\n"
5561 #ifdef CONFIG_KPROBE_EVENTS
5562 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5563 "\t\t\t Write into this file to define/undefine new trace events.\n"
5565 #ifdef CONFIG_UPROBE_EVENTS
5566 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5567 "\t\t\t Write into this file to define/undefine new trace events.\n"
5569 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5570 "\t accepts: event-definitions (one definition per line)\n"
5571 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5572 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5573 #ifdef CONFIG_HIST_TRIGGERS
5574 "\t s:[synthetic/]<event> <field> [<field>]\n"
5576 "\t e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]\n"
5577 "\t -:[<group>/]<event>\n"
5578 #ifdef CONFIG_KPROBE_EVENTS
5579 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5580 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5582 #ifdef CONFIG_UPROBE_EVENTS
5583 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5585 "\t args: <name>=fetcharg[:type]\n"
5586 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5587 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5588 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5590 "\t $stack<index>, $stack, $retval, $comm,\n"
5592 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5593 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5594 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5595 "\t <type>\\[<array-size>\\]\n"
5596 #ifdef CONFIG_HIST_TRIGGERS
5597 "\t field: <stype> <name>;\n"
5598 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5599 "\t [unsigned] char/int/long\n"
5601 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5602 "\t of the <attached-group>/<attached-event>.\n"
5604 " events/\t\t- Directory containing all trace event subsystems:\n"
5605 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5606 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5607 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5609 " filter\t\t- If set, only events passing filter are traced\n"
5610 " events/<system>/<event>/\t- Directory containing control files for\n"
5612 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5613 " filter\t\t- If set, only events passing filter are traced\n"
5614 " trigger\t\t- If set, a command to perform when event is hit\n"
5615 "\t Format: <trigger>[:count][if <filter>]\n"
5616 "\t trigger: traceon, traceoff\n"
5617 "\t enable_event:<system>:<event>\n"
5618 "\t disable_event:<system>:<event>\n"
5619 #ifdef CONFIG_HIST_TRIGGERS
5620 "\t enable_hist:<system>:<event>\n"
5621 "\t disable_hist:<system>:<event>\n"
5623 #ifdef CONFIG_STACKTRACE
5626 #ifdef CONFIG_TRACER_SNAPSHOT
5629 #ifdef CONFIG_HIST_TRIGGERS
5630 "\t\t hist (see below)\n"
5632 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5633 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5634 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5635 "\t events/block/block_unplug/trigger\n"
5636 "\t The first disables tracing every time block_unplug is hit.\n"
5637 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5638 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5639 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5640 "\t Like function triggers, the counter is only decremented if it\n"
5641 "\t enabled or disabled tracing.\n"
5642 "\t To remove a trigger without a count:\n"
5643 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5644 "\t To remove a trigger with a count:\n"
5645 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5646 "\t Filters can be ignored when removing a trigger.\n"
5647 #ifdef CONFIG_HIST_TRIGGERS
5648 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5649 "\t Format: hist:keys=<field1[,field2,...]>\n"
5650 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5651 "\t [:values=<field1[,field2,...]>]\n"
5652 "\t [:sort=<field1[,field2,...]>]\n"
5653 "\t [:size=#entries]\n"
5654 "\t [:pause][:continue][:clear]\n"
5655 "\t [:name=histname1]\n"
5656 "\t [:<handler>.<action>]\n"
5657 "\t [if <filter>]\n\n"
5658 "\t Note, special fields can be used as well:\n"
5659 "\t common_timestamp - to record current timestamp\n"
5660 "\t common_cpu - to record the CPU the event happened on\n"
5662 "\t A hist trigger variable can be:\n"
5663 "\t - a reference to a field e.g. x=current_timestamp,\n"
5664 "\t - a reference to another variable e.g. y=$x,\n"
5665 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5666 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5668 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5669 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5670 "\t variable reference, field or numeric literal.\n"
5672 "\t When a matching event is hit, an entry is added to a hash\n"
5673 "\t table using the key(s) and value(s) named, and the value of a\n"
5674 "\t sum called 'hitcount' is incremented. Keys and values\n"
5675 "\t correspond to fields in the event's format description. Keys\n"
5676 "\t can be any field, or the special string 'stacktrace'.\n"
5677 "\t Compound keys consisting of up to two fields can be specified\n"
5678 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5679 "\t fields. Sort keys consisting of up to two fields can be\n"
5680 "\t specified using the 'sort' keyword. The sort direction can\n"
5681 "\t be modified by appending '.descending' or '.ascending' to a\n"
5682 "\t sort field. The 'size' parameter can be used to specify more\n"
5683 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5684 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5685 "\t its histogram data will be shared with other triggers of the\n"
5686 "\t same name, and trigger hits will update this common data.\n\n"
5687 "\t Reading the 'hist' file for the event will dump the hash\n"
5688 "\t table in its entirety to stdout. If there are multiple hist\n"
5689 "\t triggers attached to an event, there will be a table for each\n"
5690 "\t trigger in the output. The table displayed for a named\n"
5691 "\t trigger will be the same as any other instance having the\n"
5692 "\t same name. The default format used to display a given field\n"
5693 "\t can be modified by appending any of the following modifiers\n"
5694 "\t to the field name, as applicable:\n\n"
5695 "\t .hex display a number as a hex value\n"
5696 "\t .sym display an address as a symbol\n"
5697 "\t .sym-offset display an address as a symbol and offset\n"
5698 "\t .execname display a common_pid as a program name\n"
5699 "\t .syscall display a syscall id as a syscall name\n"
5700 "\t .log2 display log2 value rather than raw number\n"
5701 "\t .buckets=size display values in groups of size rather than raw number\n"
5702 "\t .usecs display a common_timestamp in microseconds\n\n"
5703 "\t The 'pause' parameter can be used to pause an existing hist\n"
5704 "\t trigger or to start a hist trigger but not log any events\n"
5705 "\t until told to do so. 'continue' can be used to start or\n"
5706 "\t restart a paused hist trigger.\n\n"
5707 "\t The 'clear' parameter will clear the contents of a running\n"
5708 "\t hist trigger and leave its current paused/active state\n"
5710 "\t The enable_hist and disable_hist triggers can be used to\n"
5711 "\t have one event conditionally start and stop another event's\n"
5712 "\t already-attached hist trigger. The syntax is analogous to\n"
5713 "\t the enable_event and disable_event triggers.\n\n"
5714 "\t Hist trigger handlers and actions are executed whenever a\n"
5715 "\t a histogram entry is added or updated. They take the form:\n\n"
5716 "\t <handler>.<action>\n\n"
5717 "\t The available handlers are:\n\n"
5718 "\t onmatch(matching.event) - invoke on addition or update\n"
5719 "\t onmax(var) - invoke if var exceeds current max\n"
5720 "\t onchange(var) - invoke action if var changes\n\n"
5721 "\t The available actions are:\n\n"
5722 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5723 "\t save(field,...) - save current event fields\n"
5724 #ifdef CONFIG_TRACER_SNAPSHOT
5725 "\t snapshot() - snapshot the trace buffer\n\n"
5727 #ifdef CONFIG_SYNTH_EVENTS
5728 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5729 "\t Write into this file to define/undefine new synthetic events.\n"
5730 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5736 tracing_readme_read(struct file *filp, char __user *ubuf,
5737 size_t cnt, loff_t *ppos)
5739 return simple_read_from_buffer(ubuf, cnt, ppos,
5740 readme_msg, strlen(readme_msg));
5743 static const struct file_operations tracing_readme_fops = {
5744 .open = tracing_open_generic,
5745 .read = tracing_readme_read,
5746 .llseek = generic_file_llseek,
5749 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5753 return trace_find_tgid_ptr(pid);
5756 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5760 return trace_find_tgid_ptr(pid);
5763 static void saved_tgids_stop(struct seq_file *m, void *v)
5767 static int saved_tgids_show(struct seq_file *m, void *v)
5769 int *entry = (int *)v;
5770 int pid = entry - tgid_map;
5776 seq_printf(m, "%d %d\n", pid, tgid);
5780 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5781 .start = saved_tgids_start,
5782 .stop = saved_tgids_stop,
5783 .next = saved_tgids_next,
5784 .show = saved_tgids_show,
5787 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5791 ret = tracing_check_open_get_tr(NULL);
5795 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5799 static const struct file_operations tracing_saved_tgids_fops = {
5800 .open = tracing_saved_tgids_open,
5802 .llseek = seq_lseek,
5803 .release = seq_release,
5806 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5808 unsigned int *ptr = v;
5810 if (*pos || m->count)
5815 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5817 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5826 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5832 arch_spin_lock(&trace_cmdline_lock);
5834 v = &savedcmd->map_cmdline_to_pid[0];
5836 v = saved_cmdlines_next(m, v, &l);
5844 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5846 arch_spin_unlock(&trace_cmdline_lock);
5850 static int saved_cmdlines_show(struct seq_file *m, void *v)
5852 char buf[TASK_COMM_LEN];
5853 unsigned int *pid = v;
5855 __trace_find_cmdline(*pid, buf);
5856 seq_printf(m, "%d %s\n", *pid, buf);
5860 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5861 .start = saved_cmdlines_start,
5862 .next = saved_cmdlines_next,
5863 .stop = saved_cmdlines_stop,
5864 .show = saved_cmdlines_show,
5867 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5871 ret = tracing_check_open_get_tr(NULL);
5875 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5878 static const struct file_operations tracing_saved_cmdlines_fops = {
5879 .open = tracing_saved_cmdlines_open,
5881 .llseek = seq_lseek,
5882 .release = seq_release,
5886 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5887 size_t cnt, loff_t *ppos)
5892 arch_spin_lock(&trace_cmdline_lock);
5893 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5894 arch_spin_unlock(&trace_cmdline_lock);
5896 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5899 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5901 kfree(s->saved_cmdlines);
5902 kfree(s->map_cmdline_to_pid);
5906 static int tracing_resize_saved_cmdlines(unsigned int val)
5908 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5910 s = kmalloc(sizeof(*s), GFP_KERNEL);
5914 if (allocate_cmdlines_buffer(val, s) < 0) {
5919 arch_spin_lock(&trace_cmdline_lock);
5920 savedcmd_temp = savedcmd;
5922 arch_spin_unlock(&trace_cmdline_lock);
5923 free_saved_cmdlines_buffer(savedcmd_temp);
5929 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5930 size_t cnt, loff_t *ppos)
5935 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5939 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5940 if (!val || val > PID_MAX_DEFAULT)
5943 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5952 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5953 .open = tracing_open_generic,
5954 .read = tracing_saved_cmdlines_size_read,
5955 .write = tracing_saved_cmdlines_size_write,
5958 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5959 static union trace_eval_map_item *
5960 update_eval_map(union trace_eval_map_item *ptr)
5962 if (!ptr->map.eval_string) {
5963 if (ptr->tail.next) {
5964 ptr = ptr->tail.next;
5965 /* Set ptr to the next real item (skip head) */
5973 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5975 union trace_eval_map_item *ptr = v;
5978 * Paranoid! If ptr points to end, we don't want to increment past it.
5979 * This really should never happen.
5982 ptr = update_eval_map(ptr);
5983 if (WARN_ON_ONCE(!ptr))
5987 ptr = update_eval_map(ptr);
5992 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5994 union trace_eval_map_item *v;
5997 mutex_lock(&trace_eval_mutex);
5999 v = trace_eval_maps;
6003 while (v && l < *pos) {
6004 v = eval_map_next(m, v, &l);
6010 static void eval_map_stop(struct seq_file *m, void *v)
6012 mutex_unlock(&trace_eval_mutex);
6015 static int eval_map_show(struct seq_file *m, void *v)
6017 union trace_eval_map_item *ptr = v;
6019 seq_printf(m, "%s %ld (%s)\n",
6020 ptr->map.eval_string, ptr->map.eval_value,
6026 static const struct seq_operations tracing_eval_map_seq_ops = {
6027 .start = eval_map_start,
6028 .next = eval_map_next,
6029 .stop = eval_map_stop,
6030 .show = eval_map_show,
6033 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6037 ret = tracing_check_open_get_tr(NULL);
6041 return seq_open(filp, &tracing_eval_map_seq_ops);
6044 static const struct file_operations tracing_eval_map_fops = {
6045 .open = tracing_eval_map_open,
6047 .llseek = seq_lseek,
6048 .release = seq_release,
6051 static inline union trace_eval_map_item *
6052 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6054 /* Return tail of array given the head */
6055 return ptr + ptr->head.length + 1;
6059 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6062 struct trace_eval_map **stop;
6063 struct trace_eval_map **map;
6064 union trace_eval_map_item *map_array;
6065 union trace_eval_map_item *ptr;
6070 * The trace_eval_maps contains the map plus a head and tail item,
6071 * where the head holds the module and length of array, and the
6072 * tail holds a pointer to the next list.
6074 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6076 pr_warn("Unable to allocate trace eval mapping\n");
6080 mutex_lock(&trace_eval_mutex);
6082 if (!trace_eval_maps)
6083 trace_eval_maps = map_array;
6085 ptr = trace_eval_maps;
6087 ptr = trace_eval_jmp_to_tail(ptr);
6088 if (!ptr->tail.next)
6090 ptr = ptr->tail.next;
6093 ptr->tail.next = map_array;
6095 map_array->head.mod = mod;
6096 map_array->head.length = len;
6099 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6100 map_array->map = **map;
6103 memset(map_array, 0, sizeof(*map_array));
6105 mutex_unlock(&trace_eval_mutex);
6108 static void trace_create_eval_file(struct dentry *d_tracer)
6110 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6111 NULL, &tracing_eval_map_fops);
6114 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6115 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6116 static inline void trace_insert_eval_map_file(struct module *mod,
6117 struct trace_eval_map **start, int len) { }
6118 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6120 static void trace_insert_eval_map(struct module *mod,
6121 struct trace_eval_map **start, int len)
6123 struct trace_eval_map **map;
6130 trace_event_eval_update(map, len);
6132 trace_insert_eval_map_file(mod, start, len);
6136 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6137 size_t cnt, loff_t *ppos)
6139 struct trace_array *tr = filp->private_data;
6140 char buf[MAX_TRACER_SIZE+2];
6143 mutex_lock(&trace_types_lock);
6144 r = sprintf(buf, "%s\n", tr->current_trace->name);
6145 mutex_unlock(&trace_types_lock);
6147 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6150 int tracer_init(struct tracer *t, struct trace_array *tr)
6152 tracing_reset_online_cpus(&tr->array_buffer);
6156 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6160 for_each_tracing_cpu(cpu)
6161 per_cpu_ptr(buf->data, cpu)->entries = val;
6164 #ifdef CONFIG_TRACER_MAX_TRACE
6165 /* resize @tr's buffer to the size of @size_tr's entries */
6166 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6167 struct array_buffer *size_buf, int cpu_id)
6171 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6172 for_each_tracing_cpu(cpu) {
6173 ret = ring_buffer_resize(trace_buf->buffer,
6174 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6177 per_cpu_ptr(trace_buf->data, cpu)->entries =
6178 per_cpu_ptr(size_buf->data, cpu)->entries;
6181 ret = ring_buffer_resize(trace_buf->buffer,
6182 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6184 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6185 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6190 #endif /* CONFIG_TRACER_MAX_TRACE */
6192 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6193 unsigned long size, int cpu)
6198 * If kernel or user changes the size of the ring buffer
6199 * we use the size that was given, and we can forget about
6200 * expanding it later.
6202 ring_buffer_expanded = true;
6204 /* May be called before buffers are initialized */
6205 if (!tr->array_buffer.buffer)
6208 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6212 #ifdef CONFIG_TRACER_MAX_TRACE
6213 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6214 !tr->current_trace->use_max_tr)
6217 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6219 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6220 &tr->array_buffer, cpu);
6223 * AARGH! We are left with different
6224 * size max buffer!!!!
6225 * The max buffer is our "snapshot" buffer.
6226 * When a tracer needs a snapshot (one of the
6227 * latency tracers), it swaps the max buffer
6228 * with the saved snap shot. We succeeded to
6229 * update the size of the main buffer, but failed to
6230 * update the size of the max buffer. But when we tried
6231 * to reset the main buffer to the original size, we
6232 * failed there too. This is very unlikely to
6233 * happen, but if it does, warn and kill all
6237 tracing_disabled = 1;
6242 if (cpu == RING_BUFFER_ALL_CPUS)
6243 set_buffer_entries(&tr->max_buffer, size);
6245 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6248 #endif /* CONFIG_TRACER_MAX_TRACE */
6250 if (cpu == RING_BUFFER_ALL_CPUS)
6251 set_buffer_entries(&tr->array_buffer, size);
6253 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6258 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6259 unsigned long size, int cpu_id)
6263 mutex_lock(&trace_types_lock);
6265 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6266 /* make sure, this cpu is enabled in the mask */
6267 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6273 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6278 mutex_unlock(&trace_types_lock);
6285 * tracing_update_buffers - used by tracing facility to expand ring buffers
6287 * To save on memory when the tracing is never used on a system with it
6288 * configured in. The ring buffers are set to a minimum size. But once
6289 * a user starts to use the tracing facility, then they need to grow
6290 * to their default size.
6292 * This function is to be called when a tracer is about to be used.
6294 int tracing_update_buffers(void)
6298 mutex_lock(&trace_types_lock);
6299 if (!ring_buffer_expanded)
6300 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6301 RING_BUFFER_ALL_CPUS);
6302 mutex_unlock(&trace_types_lock);
6307 struct trace_option_dentry;
6310 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6313 * Used to clear out the tracer before deletion of an instance.
6314 * Must have trace_types_lock held.
6316 static void tracing_set_nop(struct trace_array *tr)
6318 if (tr->current_trace == &nop_trace)
6321 tr->current_trace->enabled--;
6323 if (tr->current_trace->reset)
6324 tr->current_trace->reset(tr);
6326 tr->current_trace = &nop_trace;
6329 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6331 /* Only enable if the directory has been created already. */
6335 create_trace_option_files(tr, t);
6338 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6341 #ifdef CONFIG_TRACER_MAX_TRACE
6346 mutex_lock(&trace_types_lock);
6348 if (!ring_buffer_expanded) {
6349 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6350 RING_BUFFER_ALL_CPUS);
6356 for (t = trace_types; t; t = t->next) {
6357 if (strcmp(t->name, buf) == 0)
6364 if (t == tr->current_trace)
6367 #ifdef CONFIG_TRACER_SNAPSHOT
6368 if (t->use_max_tr) {
6369 arch_spin_lock(&tr->max_lock);
6370 if (tr->cond_snapshot)
6372 arch_spin_unlock(&tr->max_lock);
6377 /* Some tracers won't work on kernel command line */
6378 if (system_state < SYSTEM_RUNNING && t->noboot) {
6379 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6384 /* Some tracers are only allowed for the top level buffer */
6385 if (!trace_ok_for_array(t, tr)) {
6390 /* If trace pipe files are being read, we can't change the tracer */
6391 if (tr->trace_ref) {
6396 trace_branch_disable();
6398 tr->current_trace->enabled--;
6400 if (tr->current_trace->reset)
6401 tr->current_trace->reset(tr);
6403 /* Current trace needs to be nop_trace before synchronize_rcu */
6404 tr->current_trace = &nop_trace;
6406 #ifdef CONFIG_TRACER_MAX_TRACE
6407 had_max_tr = tr->allocated_snapshot;
6409 if (had_max_tr && !t->use_max_tr) {
6411 * We need to make sure that the update_max_tr sees that
6412 * current_trace changed to nop_trace to keep it from
6413 * swapping the buffers after we resize it.
6414 * The update_max_tr is called from interrupts disabled
6415 * so a synchronized_sched() is sufficient.
6422 #ifdef CONFIG_TRACER_MAX_TRACE
6423 if (t->use_max_tr && !had_max_tr) {
6424 ret = tracing_alloc_snapshot_instance(tr);
6431 ret = tracer_init(t, tr);
6436 tr->current_trace = t;
6437 tr->current_trace->enabled++;
6438 trace_branch_enable(tr);
6440 mutex_unlock(&trace_types_lock);
6446 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6447 size_t cnt, loff_t *ppos)
6449 struct trace_array *tr = filp->private_data;
6450 char buf[MAX_TRACER_SIZE+1];
6457 if (cnt > MAX_TRACER_SIZE)
6458 cnt = MAX_TRACER_SIZE;
6460 if (copy_from_user(buf, ubuf, cnt))
6465 /* strip ending whitespace. */
6466 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6469 err = tracing_set_tracer(tr, buf);
6479 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6480 size_t cnt, loff_t *ppos)
6485 r = snprintf(buf, sizeof(buf), "%ld\n",
6486 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6487 if (r > sizeof(buf))
6489 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6493 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6494 size_t cnt, loff_t *ppos)
6499 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6509 tracing_thresh_read(struct file *filp, char __user *ubuf,
6510 size_t cnt, loff_t *ppos)
6512 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6516 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6517 size_t cnt, loff_t *ppos)
6519 struct trace_array *tr = filp->private_data;
6522 mutex_lock(&trace_types_lock);
6523 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6527 if (tr->current_trace->update_thresh) {
6528 ret = tr->current_trace->update_thresh(tr);
6535 mutex_unlock(&trace_types_lock);
6540 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6543 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6544 size_t cnt, loff_t *ppos)
6546 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6550 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6551 size_t cnt, loff_t *ppos)
6553 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6558 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6560 struct trace_array *tr = inode->i_private;
6561 struct trace_iterator *iter;
6564 ret = tracing_check_open_get_tr(tr);
6568 mutex_lock(&trace_types_lock);
6570 /* create a buffer to store the information to pass to userspace */
6571 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6574 __trace_array_put(tr);
6578 trace_seq_init(&iter->seq);
6579 iter->trace = tr->current_trace;
6581 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6586 /* trace pipe does not show start of buffer */
6587 cpumask_setall(iter->started);
6589 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6590 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6592 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6593 if (trace_clocks[tr->clock_id].in_ns)
6594 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6597 iter->array_buffer = &tr->array_buffer;
6598 iter->cpu_file = tracing_get_cpu(inode);
6599 mutex_init(&iter->mutex);
6600 filp->private_data = iter;
6602 if (iter->trace->pipe_open)
6603 iter->trace->pipe_open(iter);
6605 nonseekable_open(inode, filp);
6609 mutex_unlock(&trace_types_lock);
6614 __trace_array_put(tr);
6615 mutex_unlock(&trace_types_lock);
6619 static int tracing_release_pipe(struct inode *inode, struct file *file)
6621 struct trace_iterator *iter = file->private_data;
6622 struct trace_array *tr = inode->i_private;
6624 mutex_lock(&trace_types_lock);
6628 if (iter->trace->pipe_close)
6629 iter->trace->pipe_close(iter);
6631 mutex_unlock(&trace_types_lock);
6633 free_cpumask_var(iter->started);
6634 mutex_destroy(&iter->mutex);
6637 trace_array_put(tr);
6643 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6645 struct trace_array *tr = iter->tr;
6647 /* Iterators are static, they should be filled or empty */
6648 if (trace_buffer_iter(iter, iter->cpu_file))
6649 return EPOLLIN | EPOLLRDNORM;
6651 if (tr->trace_flags & TRACE_ITER_BLOCK)
6653 * Always select as readable when in blocking mode
6655 return EPOLLIN | EPOLLRDNORM;
6657 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6662 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6664 struct trace_iterator *iter = filp->private_data;
6666 return trace_poll(iter, filp, poll_table);
6669 /* Must be called with iter->mutex held. */
6670 static int tracing_wait_pipe(struct file *filp)
6672 struct trace_iterator *iter = filp->private_data;
6675 while (trace_empty(iter)) {
6677 if ((filp->f_flags & O_NONBLOCK)) {
6682 * We block until we read something and tracing is disabled.
6683 * We still block if tracing is disabled, but we have never
6684 * read anything. This allows a user to cat this file, and
6685 * then enable tracing. But after we have read something,
6686 * we give an EOF when tracing is again disabled.
6688 * iter->pos will be 0 if we haven't read anything.
6690 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6693 mutex_unlock(&iter->mutex);
6695 ret = wait_on_pipe(iter, 0);
6697 mutex_lock(&iter->mutex);
6710 tracing_read_pipe(struct file *filp, char __user *ubuf,
6711 size_t cnt, loff_t *ppos)
6713 struct trace_iterator *iter = filp->private_data;
6717 * Avoid more than one consumer on a single file descriptor
6718 * This is just a matter of traces coherency, the ring buffer itself
6721 mutex_lock(&iter->mutex);
6723 /* return any leftover data */
6724 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6728 trace_seq_init(&iter->seq);
6730 if (iter->trace->read) {
6731 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6737 sret = tracing_wait_pipe(filp);
6741 /* stop when tracing is finished */
6742 if (trace_empty(iter)) {
6747 if (cnt >= PAGE_SIZE)
6748 cnt = PAGE_SIZE - 1;
6750 /* reset all but tr, trace, and overruns */
6751 trace_iterator_reset(iter);
6752 cpumask_clear(iter->started);
6753 trace_seq_init(&iter->seq);
6755 trace_event_read_lock();
6756 trace_access_lock(iter->cpu_file);
6757 while (trace_find_next_entry_inc(iter) != NULL) {
6758 enum print_line_t ret;
6759 int save_len = iter->seq.seq.len;
6761 ret = print_trace_line(iter);
6762 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6763 /* don't print partial lines */
6764 iter->seq.seq.len = save_len;
6767 if (ret != TRACE_TYPE_NO_CONSUME)
6768 trace_consume(iter);
6770 if (trace_seq_used(&iter->seq) >= cnt)
6774 * Setting the full flag means we reached the trace_seq buffer
6775 * size and we should leave by partial output condition above.
6776 * One of the trace_seq_* functions is not used properly.
6778 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6781 trace_access_unlock(iter->cpu_file);
6782 trace_event_read_unlock();
6784 /* Now copy what we have to the user */
6785 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6786 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6787 trace_seq_init(&iter->seq);
6790 * If there was nothing to send to user, in spite of consuming trace
6791 * entries, go back to wait for more entries.
6797 mutex_unlock(&iter->mutex);
6802 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6805 __free_page(spd->pages[idx]);
6809 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6815 /* Seq buffer is page-sized, exactly what we need. */
6817 save_len = iter->seq.seq.len;
6818 ret = print_trace_line(iter);
6820 if (trace_seq_has_overflowed(&iter->seq)) {
6821 iter->seq.seq.len = save_len;
6826 * This should not be hit, because it should only
6827 * be set if the iter->seq overflowed. But check it
6828 * anyway to be safe.
6830 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6831 iter->seq.seq.len = save_len;
6835 count = trace_seq_used(&iter->seq) - save_len;
6838 iter->seq.seq.len = save_len;
6842 if (ret != TRACE_TYPE_NO_CONSUME)
6843 trace_consume(iter);
6845 if (!trace_find_next_entry_inc(iter)) {
6855 static ssize_t tracing_splice_read_pipe(struct file *filp,
6857 struct pipe_inode_info *pipe,
6861 struct page *pages_def[PIPE_DEF_BUFFERS];
6862 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6863 struct trace_iterator *iter = filp->private_data;
6864 struct splice_pipe_desc spd = {
6866 .partial = partial_def,
6867 .nr_pages = 0, /* This gets updated below. */
6868 .nr_pages_max = PIPE_DEF_BUFFERS,
6869 .ops = &default_pipe_buf_ops,
6870 .spd_release = tracing_spd_release_pipe,
6876 if (splice_grow_spd(pipe, &spd))
6879 mutex_lock(&iter->mutex);
6881 if (iter->trace->splice_read) {
6882 ret = iter->trace->splice_read(iter, filp,
6883 ppos, pipe, len, flags);
6888 ret = tracing_wait_pipe(filp);
6892 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6897 trace_event_read_lock();
6898 trace_access_lock(iter->cpu_file);
6900 /* Fill as many pages as possible. */
6901 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6902 spd.pages[i] = alloc_page(GFP_KERNEL);
6906 rem = tracing_fill_pipe_page(rem, iter);
6908 /* Copy the data into the page, so we can start over. */
6909 ret = trace_seq_to_buffer(&iter->seq,
6910 page_address(spd.pages[i]),
6911 trace_seq_used(&iter->seq));
6913 __free_page(spd.pages[i]);
6916 spd.partial[i].offset = 0;
6917 spd.partial[i].len = trace_seq_used(&iter->seq);
6919 trace_seq_init(&iter->seq);
6922 trace_access_unlock(iter->cpu_file);
6923 trace_event_read_unlock();
6924 mutex_unlock(&iter->mutex);
6929 ret = splice_to_pipe(pipe, &spd);
6933 splice_shrink_spd(&spd);
6937 mutex_unlock(&iter->mutex);
6942 tracing_entries_read(struct file *filp, char __user *ubuf,
6943 size_t cnt, loff_t *ppos)
6945 struct inode *inode = file_inode(filp);
6946 struct trace_array *tr = inode->i_private;
6947 int cpu = tracing_get_cpu(inode);
6952 mutex_lock(&trace_types_lock);
6954 if (cpu == RING_BUFFER_ALL_CPUS) {
6955 int cpu, buf_size_same;
6960 /* check if all cpu sizes are same */
6961 for_each_tracing_cpu(cpu) {
6962 /* fill in the size from first enabled cpu */
6964 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6965 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6971 if (buf_size_same) {
6972 if (!ring_buffer_expanded)
6973 r = sprintf(buf, "%lu (expanded: %lu)\n",
6975 trace_buf_size >> 10);
6977 r = sprintf(buf, "%lu\n", size >> 10);
6979 r = sprintf(buf, "X\n");
6981 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6983 mutex_unlock(&trace_types_lock);
6985 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6990 tracing_entries_write(struct file *filp, const char __user *ubuf,
6991 size_t cnt, loff_t *ppos)
6993 struct inode *inode = file_inode(filp);
6994 struct trace_array *tr = inode->i_private;
6998 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7002 /* must have at least 1 entry */
7006 /* value is in KB */
7008 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7018 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7019 size_t cnt, loff_t *ppos)
7021 struct trace_array *tr = filp->private_data;
7024 unsigned long size = 0, expanded_size = 0;
7026 mutex_lock(&trace_types_lock);
7027 for_each_tracing_cpu(cpu) {
7028 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7029 if (!ring_buffer_expanded)
7030 expanded_size += trace_buf_size >> 10;
7032 if (ring_buffer_expanded)
7033 r = sprintf(buf, "%lu\n", size);
7035 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7036 mutex_unlock(&trace_types_lock);
7038 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7042 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7043 size_t cnt, loff_t *ppos)
7046 * There is no need to read what the user has written, this function
7047 * is just to make sure that there is no error when "echo" is used
7056 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7058 struct trace_array *tr = inode->i_private;
7060 /* disable tracing ? */
7061 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7062 tracer_tracing_off(tr);
7063 /* resize the ring buffer to 0 */
7064 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7066 trace_array_put(tr);
7072 tracing_mark_write(struct file *filp, const char __user *ubuf,
7073 size_t cnt, loff_t *fpos)
7075 struct trace_array *tr = filp->private_data;
7076 struct ring_buffer_event *event;
7077 enum event_trigger_type tt = ETT_NONE;
7078 struct trace_buffer *buffer;
7079 struct print_entry *entry;
7084 /* Used in tracing_mark_raw_write() as well */
7085 #define FAULTED_STR "<faulted>"
7086 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7088 if (tracing_disabled)
7091 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7094 if (cnt > TRACE_BUF_SIZE)
7095 cnt = TRACE_BUF_SIZE;
7097 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7099 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7101 /* If less than "<faulted>", then make sure we can still add that */
7102 if (cnt < FAULTED_SIZE)
7103 size += FAULTED_SIZE - cnt;
7105 buffer = tr->array_buffer.buffer;
7106 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7108 if (unlikely(!event))
7109 /* Ring buffer disabled, return as if not open for write */
7112 entry = ring_buffer_event_data(event);
7113 entry->ip = _THIS_IP_;
7115 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7117 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7123 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7124 /* do not add \n before testing triggers, but add \0 */
7125 entry->buf[cnt] = '\0';
7126 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7129 if (entry->buf[cnt - 1] != '\n') {
7130 entry->buf[cnt] = '\n';
7131 entry->buf[cnt + 1] = '\0';
7133 entry->buf[cnt] = '\0';
7135 if (static_branch_unlikely(&trace_marker_exports_enabled))
7136 ftrace_exports(event, TRACE_EXPORT_MARKER);
7137 __buffer_unlock_commit(buffer, event);
7140 event_triggers_post_call(tr->trace_marker_file, tt);
7145 /* Limit it for now to 3K (including tag) */
7146 #define RAW_DATA_MAX_SIZE (1024*3)
7149 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7150 size_t cnt, loff_t *fpos)
7152 struct trace_array *tr = filp->private_data;
7153 struct ring_buffer_event *event;
7154 struct trace_buffer *buffer;
7155 struct raw_data_entry *entry;
7160 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7162 if (tracing_disabled)
7165 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7168 /* The marker must at least have a tag id */
7169 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7172 if (cnt > TRACE_BUF_SIZE)
7173 cnt = TRACE_BUF_SIZE;
7175 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7177 size = sizeof(*entry) + cnt;
7178 if (cnt < FAULT_SIZE_ID)
7179 size += FAULT_SIZE_ID - cnt;
7181 buffer = tr->array_buffer.buffer;
7182 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7185 /* Ring buffer disabled, return as if not open for write */
7188 entry = ring_buffer_event_data(event);
7190 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7193 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7198 __buffer_unlock_commit(buffer, event);
7203 static int tracing_clock_show(struct seq_file *m, void *v)
7205 struct trace_array *tr = m->private;
7208 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7210 "%s%s%s%s", i ? " " : "",
7211 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7212 i == tr->clock_id ? "]" : "");
7218 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7222 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7223 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7226 if (i == ARRAY_SIZE(trace_clocks))
7229 mutex_lock(&trace_types_lock);
7233 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7236 * New clock may not be consistent with the previous clock.
7237 * Reset the buffer so that it doesn't have incomparable timestamps.
7239 tracing_reset_online_cpus(&tr->array_buffer);
7241 #ifdef CONFIG_TRACER_MAX_TRACE
7242 if (tr->max_buffer.buffer)
7243 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7244 tracing_reset_online_cpus(&tr->max_buffer);
7247 mutex_unlock(&trace_types_lock);
7252 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7253 size_t cnt, loff_t *fpos)
7255 struct seq_file *m = filp->private_data;
7256 struct trace_array *tr = m->private;
7258 const char *clockstr;
7261 if (cnt >= sizeof(buf))
7264 if (copy_from_user(buf, ubuf, cnt))
7269 clockstr = strstrip(buf);
7271 ret = tracing_set_clock(tr, clockstr);
7280 static int tracing_clock_open(struct inode *inode, struct file *file)
7282 struct trace_array *tr = inode->i_private;
7285 ret = tracing_check_open_get_tr(tr);
7289 ret = single_open(file, tracing_clock_show, inode->i_private);
7291 trace_array_put(tr);
7296 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7298 struct trace_array *tr = m->private;
7300 mutex_lock(&trace_types_lock);
7302 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7303 seq_puts(m, "delta [absolute]\n");
7305 seq_puts(m, "[delta] absolute\n");
7307 mutex_unlock(&trace_types_lock);
7312 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7314 struct trace_array *tr = inode->i_private;
7317 ret = tracing_check_open_get_tr(tr);
7321 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7323 trace_array_put(tr);
7328 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7330 if (rbe == this_cpu_read(trace_buffered_event))
7331 return ring_buffer_time_stamp(buffer);
7333 return ring_buffer_event_time_stamp(buffer, rbe);
7337 * Set or disable using the per CPU trace_buffer_event when possible.
7339 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7343 mutex_lock(&trace_types_lock);
7345 if (set && tr->no_filter_buffering_ref++)
7349 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7354 --tr->no_filter_buffering_ref;
7357 mutex_unlock(&trace_types_lock);
7362 struct ftrace_buffer_info {
7363 struct trace_iterator iter;
7365 unsigned int spare_cpu;
7369 #ifdef CONFIG_TRACER_SNAPSHOT
7370 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7372 struct trace_array *tr = inode->i_private;
7373 struct trace_iterator *iter;
7377 ret = tracing_check_open_get_tr(tr);
7381 if (file->f_mode & FMODE_READ) {
7382 iter = __tracing_open(inode, file, true);
7384 ret = PTR_ERR(iter);
7386 /* Writes still need the seq_file to hold the private data */
7388 m = kzalloc(sizeof(*m), GFP_KERNEL);
7391 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7399 iter->array_buffer = &tr->max_buffer;
7400 iter->cpu_file = tracing_get_cpu(inode);
7402 file->private_data = m;
7406 trace_array_put(tr);
7412 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7415 struct seq_file *m = filp->private_data;
7416 struct trace_iterator *iter = m->private;
7417 struct trace_array *tr = iter->tr;
7421 ret = tracing_update_buffers();
7425 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7429 mutex_lock(&trace_types_lock);
7431 if (tr->current_trace->use_max_tr) {
7436 arch_spin_lock(&tr->max_lock);
7437 if (tr->cond_snapshot)
7439 arch_spin_unlock(&tr->max_lock);
7445 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7449 if (tr->allocated_snapshot)
7453 /* Only allow per-cpu swap if the ring buffer supports it */
7454 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7455 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7460 if (tr->allocated_snapshot)
7461 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7462 &tr->array_buffer, iter->cpu_file);
7464 ret = tracing_alloc_snapshot_instance(tr);
7467 local_irq_disable();
7468 /* Now, we're going to swap */
7469 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7470 update_max_tr(tr, current, smp_processor_id(), NULL);
7472 update_max_tr_single(tr, current, iter->cpu_file);
7476 if (tr->allocated_snapshot) {
7477 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7478 tracing_reset_online_cpus(&tr->max_buffer);
7480 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7490 mutex_unlock(&trace_types_lock);
7494 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7496 struct seq_file *m = file->private_data;
7499 ret = tracing_release(inode, file);
7501 if (file->f_mode & FMODE_READ)
7504 /* If write only, the seq_file is just a stub */
7512 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7513 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7514 size_t count, loff_t *ppos);
7515 static int tracing_buffers_release(struct inode *inode, struct file *file);
7516 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7517 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7519 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7521 struct ftrace_buffer_info *info;
7524 /* The following checks for tracefs lockdown */
7525 ret = tracing_buffers_open(inode, filp);
7529 info = filp->private_data;
7531 if (info->iter.trace->use_max_tr) {
7532 tracing_buffers_release(inode, filp);
7536 info->iter.snapshot = true;
7537 info->iter.array_buffer = &info->iter.tr->max_buffer;
7542 #endif /* CONFIG_TRACER_SNAPSHOT */
7545 static const struct file_operations tracing_thresh_fops = {
7546 .open = tracing_open_generic,
7547 .read = tracing_thresh_read,
7548 .write = tracing_thresh_write,
7549 .llseek = generic_file_llseek,
7552 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7553 static const struct file_operations tracing_max_lat_fops = {
7554 .open = tracing_open_generic,
7555 .read = tracing_max_lat_read,
7556 .write = tracing_max_lat_write,
7557 .llseek = generic_file_llseek,
7561 static const struct file_operations set_tracer_fops = {
7562 .open = tracing_open_generic,
7563 .read = tracing_set_trace_read,
7564 .write = tracing_set_trace_write,
7565 .llseek = generic_file_llseek,
7568 static const struct file_operations tracing_pipe_fops = {
7569 .open = tracing_open_pipe,
7570 .poll = tracing_poll_pipe,
7571 .read = tracing_read_pipe,
7572 .splice_read = tracing_splice_read_pipe,
7573 .release = tracing_release_pipe,
7574 .llseek = no_llseek,
7577 static const struct file_operations tracing_entries_fops = {
7578 .open = tracing_open_generic_tr,
7579 .read = tracing_entries_read,
7580 .write = tracing_entries_write,
7581 .llseek = generic_file_llseek,
7582 .release = tracing_release_generic_tr,
7585 static const struct file_operations tracing_total_entries_fops = {
7586 .open = tracing_open_generic_tr,
7587 .read = tracing_total_entries_read,
7588 .llseek = generic_file_llseek,
7589 .release = tracing_release_generic_tr,
7592 static const struct file_operations tracing_free_buffer_fops = {
7593 .open = tracing_open_generic_tr,
7594 .write = tracing_free_buffer_write,
7595 .release = tracing_free_buffer_release,
7598 static const struct file_operations tracing_mark_fops = {
7599 .open = tracing_mark_open,
7600 .write = tracing_mark_write,
7601 .release = tracing_release_generic_tr,
7604 static const struct file_operations tracing_mark_raw_fops = {
7605 .open = tracing_mark_open,
7606 .write = tracing_mark_raw_write,
7607 .release = tracing_release_generic_tr,
7610 static const struct file_operations trace_clock_fops = {
7611 .open = tracing_clock_open,
7613 .llseek = seq_lseek,
7614 .release = tracing_single_release_tr,
7615 .write = tracing_clock_write,
7618 static const struct file_operations trace_time_stamp_mode_fops = {
7619 .open = tracing_time_stamp_mode_open,
7621 .llseek = seq_lseek,
7622 .release = tracing_single_release_tr,
7625 #ifdef CONFIG_TRACER_SNAPSHOT
7626 static const struct file_operations snapshot_fops = {
7627 .open = tracing_snapshot_open,
7629 .write = tracing_snapshot_write,
7630 .llseek = tracing_lseek,
7631 .release = tracing_snapshot_release,
7634 static const struct file_operations snapshot_raw_fops = {
7635 .open = snapshot_raw_open,
7636 .read = tracing_buffers_read,
7637 .release = tracing_buffers_release,
7638 .splice_read = tracing_buffers_splice_read,
7639 .llseek = no_llseek,
7642 #endif /* CONFIG_TRACER_SNAPSHOT */
7645 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7646 * @filp: The active open file structure
7647 * @ubuf: The userspace provided buffer to read value into
7648 * @cnt: The maximum number of bytes to read
7649 * @ppos: The current "file" position
7651 * This function implements the write interface for a struct trace_min_max_param.
7652 * The filp->private_data must point to a trace_min_max_param structure that
7653 * defines where to write the value, the min and the max acceptable values,
7654 * and a lock to protect the write.
7657 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7659 struct trace_min_max_param *param = filp->private_data;
7666 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7671 mutex_lock(param->lock);
7673 if (param->min && val < *param->min)
7676 if (param->max && val > *param->max)
7683 mutex_unlock(param->lock);
7692 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7693 * @filp: The active open file structure
7694 * @ubuf: The userspace provided buffer to read value into
7695 * @cnt: The maximum number of bytes to read
7696 * @ppos: The current "file" position
7698 * This function implements the read interface for a struct trace_min_max_param.
7699 * The filp->private_data must point to a trace_min_max_param struct with valid
7703 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7705 struct trace_min_max_param *param = filp->private_data;
7706 char buf[U64_STR_SIZE];
7715 if (cnt > sizeof(buf))
7718 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7720 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7723 const struct file_operations trace_min_max_fops = {
7724 .open = tracing_open_generic,
7725 .read = trace_min_max_read,
7726 .write = trace_min_max_write,
7729 #define TRACING_LOG_ERRS_MAX 8
7730 #define TRACING_LOG_LOC_MAX 128
7732 #define CMD_PREFIX " Command: "
7735 const char **errs; /* ptr to loc-specific array of err strings */
7736 u8 type; /* index into errs -> specific err string */
7737 u16 pos; /* caret position */
7741 struct tracing_log_err {
7742 struct list_head list;
7743 struct err_info info;
7744 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7745 char *cmd; /* what caused err */
7748 static DEFINE_MUTEX(tracing_err_log_lock);
7750 static struct tracing_log_err *alloc_tracing_log_err(int len)
7752 struct tracing_log_err *err;
7754 err = kzalloc(sizeof(*err), GFP_KERNEL);
7756 return ERR_PTR(-ENOMEM);
7758 err->cmd = kzalloc(len, GFP_KERNEL);
7761 return ERR_PTR(-ENOMEM);
7767 static void free_tracing_log_err(struct tracing_log_err *err)
7773 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7776 struct tracing_log_err *err;
7778 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7779 err = alloc_tracing_log_err(len);
7780 if (PTR_ERR(err) != -ENOMEM)
7781 tr->n_err_log_entries++;
7786 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7788 err->cmd = kzalloc(len, GFP_KERNEL);
7790 return ERR_PTR(-ENOMEM);
7791 list_del(&err->list);
7797 * err_pos - find the position of a string within a command for error careting
7798 * @cmd: The tracing command that caused the error
7799 * @str: The string to position the caret at within @cmd
7801 * Finds the position of the first occurrence of @str within @cmd. The
7802 * return value can be passed to tracing_log_err() for caret placement
7805 * Returns the index within @cmd of the first occurrence of @str or 0
7806 * if @str was not found.
7808 unsigned int err_pos(char *cmd, const char *str)
7812 if (WARN_ON(!strlen(cmd)))
7815 found = strstr(cmd, str);
7823 * tracing_log_err - write an error to the tracing error log
7824 * @tr: The associated trace array for the error (NULL for top level array)
7825 * @loc: A string describing where the error occurred
7826 * @cmd: The tracing command that caused the error
7827 * @errs: The array of loc-specific static error strings
7828 * @type: The index into errs[], which produces the specific static err string
7829 * @pos: The position the caret should be placed in the cmd
7831 * Writes an error into tracing/error_log of the form:
7833 * <loc>: error: <text>
7837 * tracing/error_log is a small log file containing the last
7838 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7839 * unless there has been a tracing error, and the error log can be
7840 * cleared and have its memory freed by writing the empty string in
7841 * truncation mode to it i.e. echo > tracing/error_log.
7843 * NOTE: the @errs array along with the @type param are used to
7844 * produce a static error string - this string is not copied and saved
7845 * when the error is logged - only a pointer to it is saved. See
7846 * existing callers for examples of how static strings are typically
7847 * defined for use with tracing_log_err().
7849 void tracing_log_err(struct trace_array *tr,
7850 const char *loc, const char *cmd,
7851 const char **errs, u8 type, u16 pos)
7853 struct tracing_log_err *err;
7859 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7861 mutex_lock(&tracing_err_log_lock);
7862 err = get_tracing_log_err(tr, len);
7863 if (PTR_ERR(err) == -ENOMEM) {
7864 mutex_unlock(&tracing_err_log_lock);
7868 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7869 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7871 err->info.errs = errs;
7872 err->info.type = type;
7873 err->info.pos = pos;
7874 err->info.ts = local_clock();
7876 list_add_tail(&err->list, &tr->err_log);
7877 mutex_unlock(&tracing_err_log_lock);
7880 static void clear_tracing_err_log(struct trace_array *tr)
7882 struct tracing_log_err *err, *next;
7884 mutex_lock(&tracing_err_log_lock);
7885 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7886 list_del(&err->list);
7887 free_tracing_log_err(err);
7890 tr->n_err_log_entries = 0;
7891 mutex_unlock(&tracing_err_log_lock);
7894 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7896 struct trace_array *tr = m->private;
7898 mutex_lock(&tracing_err_log_lock);
7900 return seq_list_start(&tr->err_log, *pos);
7903 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7905 struct trace_array *tr = m->private;
7907 return seq_list_next(v, &tr->err_log, pos);
7910 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7912 mutex_unlock(&tracing_err_log_lock);
7915 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7919 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7921 for (i = 0; i < pos; i++)
7926 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7928 struct tracing_log_err *err = v;
7931 const char *err_text = err->info.errs[err->info.type];
7932 u64 sec = err->info.ts;
7935 nsec = do_div(sec, NSEC_PER_SEC);
7936 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7937 err->loc, err_text);
7938 seq_printf(m, "%s", err->cmd);
7939 tracing_err_log_show_pos(m, err->info.pos);
7945 static const struct seq_operations tracing_err_log_seq_ops = {
7946 .start = tracing_err_log_seq_start,
7947 .next = tracing_err_log_seq_next,
7948 .stop = tracing_err_log_seq_stop,
7949 .show = tracing_err_log_seq_show
7952 static int tracing_err_log_open(struct inode *inode, struct file *file)
7954 struct trace_array *tr = inode->i_private;
7957 ret = tracing_check_open_get_tr(tr);
7961 /* If this file was opened for write, then erase contents */
7962 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7963 clear_tracing_err_log(tr);
7965 if (file->f_mode & FMODE_READ) {
7966 ret = seq_open(file, &tracing_err_log_seq_ops);
7968 struct seq_file *m = file->private_data;
7971 trace_array_put(tr);
7977 static ssize_t tracing_err_log_write(struct file *file,
7978 const char __user *buffer,
7979 size_t count, loff_t *ppos)
7984 static int tracing_err_log_release(struct inode *inode, struct file *file)
7986 struct trace_array *tr = inode->i_private;
7988 trace_array_put(tr);
7990 if (file->f_mode & FMODE_READ)
7991 seq_release(inode, file);
7996 static const struct file_operations tracing_err_log_fops = {
7997 .open = tracing_err_log_open,
7998 .write = tracing_err_log_write,
8000 .llseek = seq_lseek,
8001 .release = tracing_err_log_release,
8004 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8006 struct trace_array *tr = inode->i_private;
8007 struct ftrace_buffer_info *info;
8010 ret = tracing_check_open_get_tr(tr);
8014 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8016 trace_array_put(tr);
8020 mutex_lock(&trace_types_lock);
8023 info->iter.cpu_file = tracing_get_cpu(inode);
8024 info->iter.trace = tr->current_trace;
8025 info->iter.array_buffer = &tr->array_buffer;
8027 /* Force reading ring buffer for first read */
8028 info->read = (unsigned int)-1;
8030 filp->private_data = info;
8034 mutex_unlock(&trace_types_lock);
8036 ret = nonseekable_open(inode, filp);
8038 trace_array_put(tr);
8044 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8046 struct ftrace_buffer_info *info = filp->private_data;
8047 struct trace_iterator *iter = &info->iter;
8049 return trace_poll(iter, filp, poll_table);
8053 tracing_buffers_read(struct file *filp, char __user *ubuf,
8054 size_t count, loff_t *ppos)
8056 struct ftrace_buffer_info *info = filp->private_data;
8057 struct trace_iterator *iter = &info->iter;
8064 #ifdef CONFIG_TRACER_MAX_TRACE
8065 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8070 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8072 if (IS_ERR(info->spare)) {
8073 ret = PTR_ERR(info->spare);
8076 info->spare_cpu = iter->cpu_file;
8082 /* Do we have previous read data to read? */
8083 if (info->read < PAGE_SIZE)
8087 trace_access_lock(iter->cpu_file);
8088 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8092 trace_access_unlock(iter->cpu_file);
8095 if (trace_empty(iter)) {
8096 if ((filp->f_flags & O_NONBLOCK))
8099 ret = wait_on_pipe(iter, 0);
8110 size = PAGE_SIZE - info->read;
8114 ret = copy_to_user(ubuf, info->spare + info->read, size);
8126 static int tracing_buffers_release(struct inode *inode, struct file *file)
8128 struct ftrace_buffer_info *info = file->private_data;
8129 struct trace_iterator *iter = &info->iter;
8131 mutex_lock(&trace_types_lock);
8133 iter->tr->trace_ref--;
8135 __trace_array_put(iter->tr);
8138 ring_buffer_free_read_page(iter->array_buffer->buffer,
8139 info->spare_cpu, info->spare);
8142 mutex_unlock(&trace_types_lock);
8148 struct trace_buffer *buffer;
8151 refcount_t refcount;
8154 static void buffer_ref_release(struct buffer_ref *ref)
8156 if (!refcount_dec_and_test(&ref->refcount))
8158 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8162 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8163 struct pipe_buffer *buf)
8165 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8167 buffer_ref_release(ref);
8171 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8172 struct pipe_buffer *buf)
8174 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8176 if (refcount_read(&ref->refcount) > INT_MAX/2)
8179 refcount_inc(&ref->refcount);
8183 /* Pipe buffer operations for a buffer. */
8184 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8185 .release = buffer_pipe_buf_release,
8186 .get = buffer_pipe_buf_get,
8190 * Callback from splice_to_pipe(), if we need to release some pages
8191 * at the end of the spd in case we error'ed out in filling the pipe.
8193 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8195 struct buffer_ref *ref =
8196 (struct buffer_ref *)spd->partial[i].private;
8198 buffer_ref_release(ref);
8199 spd->partial[i].private = 0;
8203 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8204 struct pipe_inode_info *pipe, size_t len,
8207 struct ftrace_buffer_info *info = file->private_data;
8208 struct trace_iterator *iter = &info->iter;
8209 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8210 struct page *pages_def[PIPE_DEF_BUFFERS];
8211 struct splice_pipe_desc spd = {
8213 .partial = partial_def,
8214 .nr_pages_max = PIPE_DEF_BUFFERS,
8215 .ops = &buffer_pipe_buf_ops,
8216 .spd_release = buffer_spd_release,
8218 struct buffer_ref *ref;
8222 #ifdef CONFIG_TRACER_MAX_TRACE
8223 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8227 if (*ppos & (PAGE_SIZE - 1))
8230 if (len & (PAGE_SIZE - 1)) {
8231 if (len < PAGE_SIZE)
8236 if (splice_grow_spd(pipe, &spd))
8240 trace_access_lock(iter->cpu_file);
8241 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8243 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8247 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8253 refcount_set(&ref->refcount, 1);
8254 ref->buffer = iter->array_buffer->buffer;
8255 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8256 if (IS_ERR(ref->page)) {
8257 ret = PTR_ERR(ref->page);
8262 ref->cpu = iter->cpu_file;
8264 r = ring_buffer_read_page(ref->buffer, &ref->page,
8265 len, iter->cpu_file, 1);
8267 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8273 page = virt_to_page(ref->page);
8275 spd.pages[i] = page;
8276 spd.partial[i].len = PAGE_SIZE;
8277 spd.partial[i].offset = 0;
8278 spd.partial[i].private = (unsigned long)ref;
8282 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8285 trace_access_unlock(iter->cpu_file);
8288 /* did we read anything? */
8289 if (!spd.nr_pages) {
8294 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8297 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8304 ret = splice_to_pipe(pipe, &spd);
8306 splice_shrink_spd(&spd);
8311 static const struct file_operations tracing_buffers_fops = {
8312 .open = tracing_buffers_open,
8313 .read = tracing_buffers_read,
8314 .poll = tracing_buffers_poll,
8315 .release = tracing_buffers_release,
8316 .splice_read = tracing_buffers_splice_read,
8317 .llseek = no_llseek,
8321 tracing_stats_read(struct file *filp, char __user *ubuf,
8322 size_t count, loff_t *ppos)
8324 struct inode *inode = file_inode(filp);
8325 struct trace_array *tr = inode->i_private;
8326 struct array_buffer *trace_buf = &tr->array_buffer;
8327 int cpu = tracing_get_cpu(inode);
8328 struct trace_seq *s;
8330 unsigned long long t;
8331 unsigned long usec_rem;
8333 s = kmalloc(sizeof(*s), GFP_KERNEL);
8339 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8340 trace_seq_printf(s, "entries: %ld\n", cnt);
8342 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8343 trace_seq_printf(s, "overrun: %ld\n", cnt);
8345 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8346 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8348 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8349 trace_seq_printf(s, "bytes: %ld\n", cnt);
8351 if (trace_clocks[tr->clock_id].in_ns) {
8352 /* local or global for trace_clock */
8353 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8354 usec_rem = do_div(t, USEC_PER_SEC);
8355 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8358 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8359 usec_rem = do_div(t, USEC_PER_SEC);
8360 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8362 /* counter or tsc mode for trace_clock */
8363 trace_seq_printf(s, "oldest event ts: %llu\n",
8364 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8366 trace_seq_printf(s, "now ts: %llu\n",
8367 ring_buffer_time_stamp(trace_buf->buffer));
8370 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8371 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8373 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8374 trace_seq_printf(s, "read events: %ld\n", cnt);
8376 count = simple_read_from_buffer(ubuf, count, ppos,
8377 s->buffer, trace_seq_used(s));
8384 static const struct file_operations tracing_stats_fops = {
8385 .open = tracing_open_generic_tr,
8386 .read = tracing_stats_read,
8387 .llseek = generic_file_llseek,
8388 .release = tracing_release_generic_tr,
8391 #ifdef CONFIG_DYNAMIC_FTRACE
8394 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8395 size_t cnt, loff_t *ppos)
8401 /* 256 should be plenty to hold the amount needed */
8402 buf = kmalloc(256, GFP_KERNEL);
8406 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8407 ftrace_update_tot_cnt,
8408 ftrace_number_of_pages,
8409 ftrace_number_of_groups);
8411 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8416 static const struct file_operations tracing_dyn_info_fops = {
8417 .open = tracing_open_generic,
8418 .read = tracing_read_dyn_info,
8419 .llseek = generic_file_llseek,
8421 #endif /* CONFIG_DYNAMIC_FTRACE */
8423 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8425 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8426 struct trace_array *tr, struct ftrace_probe_ops *ops,
8429 tracing_snapshot_instance(tr);
8433 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8434 struct trace_array *tr, struct ftrace_probe_ops *ops,
8437 struct ftrace_func_mapper *mapper = data;
8441 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8451 tracing_snapshot_instance(tr);
8455 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8456 struct ftrace_probe_ops *ops, void *data)
8458 struct ftrace_func_mapper *mapper = data;
8461 seq_printf(m, "%ps:", (void *)ip);
8463 seq_puts(m, "snapshot");
8466 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8469 seq_printf(m, ":count=%ld\n", *count);
8471 seq_puts(m, ":unlimited\n");
8477 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8478 unsigned long ip, void *init_data, void **data)
8480 struct ftrace_func_mapper *mapper = *data;
8483 mapper = allocate_ftrace_func_mapper();
8489 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8493 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8494 unsigned long ip, void *data)
8496 struct ftrace_func_mapper *mapper = data;
8501 free_ftrace_func_mapper(mapper, NULL);
8505 ftrace_func_mapper_remove_ip(mapper, ip);
8508 static struct ftrace_probe_ops snapshot_probe_ops = {
8509 .func = ftrace_snapshot,
8510 .print = ftrace_snapshot_print,
8513 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8514 .func = ftrace_count_snapshot,
8515 .print = ftrace_snapshot_print,
8516 .init = ftrace_snapshot_init,
8517 .free = ftrace_snapshot_free,
8521 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8522 char *glob, char *cmd, char *param, int enable)
8524 struct ftrace_probe_ops *ops;
8525 void *count = (void *)-1;
8532 /* hash funcs only work with set_ftrace_filter */
8536 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8539 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8544 number = strsep(¶m, ":");
8546 if (!strlen(number))
8550 * We use the callback data field (which is a pointer)
8553 ret = kstrtoul(number, 0, (unsigned long *)&count);
8558 ret = tracing_alloc_snapshot_instance(tr);
8562 ret = register_ftrace_function_probe(glob, tr, ops, count);
8565 return ret < 0 ? ret : 0;
8568 static struct ftrace_func_command ftrace_snapshot_cmd = {
8570 .func = ftrace_trace_snapshot_callback,
8573 static __init int register_snapshot_cmd(void)
8575 return register_ftrace_command(&ftrace_snapshot_cmd);
8578 static inline __init int register_snapshot_cmd(void) { return 0; }
8579 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8581 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8583 if (WARN_ON(!tr->dir))
8584 return ERR_PTR(-ENODEV);
8586 /* Top directory uses NULL as the parent */
8587 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8590 /* All sub buffers have a descriptor */
8594 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8596 struct dentry *d_tracer;
8599 return tr->percpu_dir;
8601 d_tracer = tracing_get_dentry(tr);
8602 if (IS_ERR(d_tracer))
8605 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8607 MEM_FAIL(!tr->percpu_dir,
8608 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8610 return tr->percpu_dir;
8613 static struct dentry *
8614 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8615 void *data, long cpu, const struct file_operations *fops)
8617 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8619 if (ret) /* See tracing_get_cpu() */
8620 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8625 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8627 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8628 struct dentry *d_cpu;
8629 char cpu_dir[30]; /* 30 characters should be more than enough */
8634 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8635 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8637 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8641 /* per cpu trace_pipe */
8642 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8643 tr, cpu, &tracing_pipe_fops);
8646 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8647 tr, cpu, &tracing_fops);
8649 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8650 tr, cpu, &tracing_buffers_fops);
8652 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8653 tr, cpu, &tracing_stats_fops);
8655 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8656 tr, cpu, &tracing_entries_fops);
8658 #ifdef CONFIG_TRACER_SNAPSHOT
8659 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8660 tr, cpu, &snapshot_fops);
8662 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8663 tr, cpu, &snapshot_raw_fops);
8667 #ifdef CONFIG_FTRACE_SELFTEST
8668 /* Let selftest have access to static functions in this file */
8669 #include "trace_selftest.c"
8673 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8676 struct trace_option_dentry *topt = filp->private_data;
8679 if (topt->flags->val & topt->opt->bit)
8684 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8688 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8691 struct trace_option_dentry *topt = filp->private_data;
8695 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8699 if (val != 0 && val != 1)
8702 if (!!(topt->flags->val & topt->opt->bit) != val) {
8703 mutex_lock(&trace_types_lock);
8704 ret = __set_tracer_option(topt->tr, topt->flags,
8706 mutex_unlock(&trace_types_lock);
8717 static const struct file_operations trace_options_fops = {
8718 .open = tracing_open_generic,
8719 .read = trace_options_read,
8720 .write = trace_options_write,
8721 .llseek = generic_file_llseek,
8725 * In order to pass in both the trace_array descriptor as well as the index
8726 * to the flag that the trace option file represents, the trace_array
8727 * has a character array of trace_flags_index[], which holds the index
8728 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8729 * The address of this character array is passed to the flag option file
8730 * read/write callbacks.
8732 * In order to extract both the index and the trace_array descriptor,
8733 * get_tr_index() uses the following algorithm.
8737 * As the pointer itself contains the address of the index (remember
8740 * Then to get the trace_array descriptor, by subtracting that index
8741 * from the ptr, we get to the start of the index itself.
8743 * ptr - idx == &index[0]
8745 * Then a simple container_of() from that pointer gets us to the
8746 * trace_array descriptor.
8748 static void get_tr_index(void *data, struct trace_array **ptr,
8749 unsigned int *pindex)
8751 *pindex = *(unsigned char *)data;
8753 *ptr = container_of(data - *pindex, struct trace_array,
8758 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8761 void *tr_index = filp->private_data;
8762 struct trace_array *tr;
8766 get_tr_index(tr_index, &tr, &index);
8768 if (tr->trace_flags & (1 << index))
8773 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8777 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8780 void *tr_index = filp->private_data;
8781 struct trace_array *tr;
8786 get_tr_index(tr_index, &tr, &index);
8788 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8792 if (val != 0 && val != 1)
8795 mutex_lock(&event_mutex);
8796 mutex_lock(&trace_types_lock);
8797 ret = set_tracer_flag(tr, 1 << index, val);
8798 mutex_unlock(&trace_types_lock);
8799 mutex_unlock(&event_mutex);
8809 static const struct file_operations trace_options_core_fops = {
8810 .open = tracing_open_generic,
8811 .read = trace_options_core_read,
8812 .write = trace_options_core_write,
8813 .llseek = generic_file_llseek,
8816 struct dentry *trace_create_file(const char *name,
8818 struct dentry *parent,
8820 const struct file_operations *fops)
8824 ret = tracefs_create_file(name, mode, parent, data, fops);
8826 pr_warn("Could not create tracefs '%s' entry\n", name);
8832 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8834 struct dentry *d_tracer;
8839 d_tracer = tracing_get_dentry(tr);
8840 if (IS_ERR(d_tracer))
8843 tr->options = tracefs_create_dir("options", d_tracer);
8845 pr_warn("Could not create tracefs directory 'options'\n");
8853 create_trace_option_file(struct trace_array *tr,
8854 struct trace_option_dentry *topt,
8855 struct tracer_flags *flags,
8856 struct tracer_opt *opt)
8858 struct dentry *t_options;
8860 t_options = trace_options_init_dentry(tr);
8864 topt->flags = flags;
8868 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8869 t_options, topt, &trace_options_fops);
8874 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8876 struct trace_option_dentry *topts;
8877 struct trace_options *tr_topts;
8878 struct tracer_flags *flags;
8879 struct tracer_opt *opts;
8886 flags = tracer->flags;
8888 if (!flags || !flags->opts)
8892 * If this is an instance, only create flags for tracers
8893 * the instance may have.
8895 if (!trace_ok_for_array(tracer, tr))
8898 for (i = 0; i < tr->nr_topts; i++) {
8899 /* Make sure there's no duplicate flags. */
8900 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8906 for (cnt = 0; opts[cnt].name; cnt++)
8909 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8913 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8920 tr->topts = tr_topts;
8921 tr->topts[tr->nr_topts].tracer = tracer;
8922 tr->topts[tr->nr_topts].topts = topts;
8925 for (cnt = 0; opts[cnt].name; cnt++) {
8926 create_trace_option_file(tr, &topts[cnt], flags,
8928 MEM_FAIL(topts[cnt].entry == NULL,
8929 "Failed to create trace option: %s",
8934 static struct dentry *
8935 create_trace_option_core_file(struct trace_array *tr,
8936 const char *option, long index)
8938 struct dentry *t_options;
8940 t_options = trace_options_init_dentry(tr);
8944 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8945 (void *)&tr->trace_flags_index[index],
8946 &trace_options_core_fops);
8949 static void create_trace_options_dir(struct trace_array *tr)
8951 struct dentry *t_options;
8952 bool top_level = tr == &global_trace;
8955 t_options = trace_options_init_dentry(tr);
8959 for (i = 0; trace_options[i]; i++) {
8961 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8962 create_trace_option_core_file(tr, trace_options[i], i);
8967 rb_simple_read(struct file *filp, char __user *ubuf,
8968 size_t cnt, loff_t *ppos)
8970 struct trace_array *tr = filp->private_data;
8974 r = tracer_tracing_is_on(tr);
8975 r = sprintf(buf, "%d\n", r);
8977 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8981 rb_simple_write(struct file *filp, const char __user *ubuf,
8982 size_t cnt, loff_t *ppos)
8984 struct trace_array *tr = filp->private_data;
8985 struct trace_buffer *buffer = tr->array_buffer.buffer;
8989 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8994 mutex_lock(&trace_types_lock);
8995 if (!!val == tracer_tracing_is_on(tr)) {
8996 val = 0; /* do nothing */
8998 tracer_tracing_on(tr);
8999 if (tr->current_trace->start)
9000 tr->current_trace->start(tr);
9002 tracer_tracing_off(tr);
9003 if (tr->current_trace->stop)
9004 tr->current_trace->stop(tr);
9006 mutex_unlock(&trace_types_lock);
9014 static const struct file_operations rb_simple_fops = {
9015 .open = tracing_open_generic_tr,
9016 .read = rb_simple_read,
9017 .write = rb_simple_write,
9018 .release = tracing_release_generic_tr,
9019 .llseek = default_llseek,
9023 buffer_percent_read(struct file *filp, char __user *ubuf,
9024 size_t cnt, loff_t *ppos)
9026 struct trace_array *tr = filp->private_data;
9030 r = tr->buffer_percent;
9031 r = sprintf(buf, "%d\n", r);
9033 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9037 buffer_percent_write(struct file *filp, const char __user *ubuf,
9038 size_t cnt, loff_t *ppos)
9040 struct trace_array *tr = filp->private_data;
9044 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9054 tr->buffer_percent = val;
9061 static const struct file_operations buffer_percent_fops = {
9062 .open = tracing_open_generic_tr,
9063 .read = buffer_percent_read,
9064 .write = buffer_percent_write,
9065 .release = tracing_release_generic_tr,
9066 .llseek = default_llseek,
9069 static struct dentry *trace_instance_dir;
9072 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9075 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9077 enum ring_buffer_flags rb_flags;
9079 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9083 buf->buffer = ring_buffer_alloc(size, rb_flags);
9087 buf->data = alloc_percpu(struct trace_array_cpu);
9089 ring_buffer_free(buf->buffer);
9094 /* Allocate the first page for all buffers */
9095 set_buffer_entries(&tr->array_buffer,
9096 ring_buffer_size(tr->array_buffer.buffer, 0));
9101 static int allocate_trace_buffers(struct trace_array *tr, int size)
9105 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9109 #ifdef CONFIG_TRACER_MAX_TRACE
9110 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9111 allocate_snapshot ? size : 1);
9112 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9113 ring_buffer_free(tr->array_buffer.buffer);
9114 tr->array_buffer.buffer = NULL;
9115 free_percpu(tr->array_buffer.data);
9116 tr->array_buffer.data = NULL;
9119 tr->allocated_snapshot = allocate_snapshot;
9122 * Only the top level trace array gets its snapshot allocated
9123 * from the kernel command line.
9125 allocate_snapshot = false;
9131 static void free_trace_buffer(struct array_buffer *buf)
9134 ring_buffer_free(buf->buffer);
9136 free_percpu(buf->data);
9141 static void free_trace_buffers(struct trace_array *tr)
9146 free_trace_buffer(&tr->array_buffer);
9148 #ifdef CONFIG_TRACER_MAX_TRACE
9149 free_trace_buffer(&tr->max_buffer);
9153 static void init_trace_flags_index(struct trace_array *tr)
9157 /* Used by the trace options files */
9158 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9159 tr->trace_flags_index[i] = i;
9162 static void __update_tracer_options(struct trace_array *tr)
9166 for (t = trace_types; t; t = t->next)
9167 add_tracer_options(tr, t);
9170 static void update_tracer_options(struct trace_array *tr)
9172 mutex_lock(&trace_types_lock);
9173 __update_tracer_options(tr);
9174 mutex_unlock(&trace_types_lock);
9177 /* Must have trace_types_lock held */
9178 struct trace_array *trace_array_find(const char *instance)
9180 struct trace_array *tr, *found = NULL;
9182 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9183 if (tr->name && strcmp(tr->name, instance) == 0) {
9192 struct trace_array *trace_array_find_get(const char *instance)
9194 struct trace_array *tr;
9196 mutex_lock(&trace_types_lock);
9197 tr = trace_array_find(instance);
9200 mutex_unlock(&trace_types_lock);
9205 static int trace_array_create_dir(struct trace_array *tr)
9209 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9213 ret = event_trace_add_tracer(tr->dir, tr);
9215 tracefs_remove(tr->dir);
9219 init_tracer_tracefs(tr, tr->dir);
9220 __update_tracer_options(tr);
9225 static struct trace_array *trace_array_create(const char *name)
9227 struct trace_array *tr;
9231 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9233 return ERR_PTR(ret);
9235 tr->name = kstrdup(name, GFP_KERNEL);
9239 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9242 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9244 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9246 raw_spin_lock_init(&tr->start_lock);
9248 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9250 tr->current_trace = &nop_trace;
9252 INIT_LIST_HEAD(&tr->systems);
9253 INIT_LIST_HEAD(&tr->events);
9254 INIT_LIST_HEAD(&tr->hist_vars);
9255 INIT_LIST_HEAD(&tr->err_log);
9257 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9260 if (ftrace_allocate_ftrace_ops(tr) < 0)
9263 ftrace_init_trace_array(tr);
9265 init_trace_flags_index(tr);
9267 if (trace_instance_dir) {
9268 ret = trace_array_create_dir(tr);
9272 __trace_early_add_events(tr);
9274 list_add(&tr->list, &ftrace_trace_arrays);
9281 ftrace_free_ftrace_ops(tr);
9282 free_trace_buffers(tr);
9283 free_cpumask_var(tr->tracing_cpumask);
9287 return ERR_PTR(ret);
9290 static int instance_mkdir(const char *name)
9292 struct trace_array *tr;
9295 mutex_lock(&event_mutex);
9296 mutex_lock(&trace_types_lock);
9299 if (trace_array_find(name))
9302 tr = trace_array_create(name);
9304 ret = PTR_ERR_OR_ZERO(tr);
9307 mutex_unlock(&trace_types_lock);
9308 mutex_unlock(&event_mutex);
9313 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9314 * @name: The name of the trace array to be looked up/created.
9316 * Returns pointer to trace array with given name.
9317 * NULL, if it cannot be created.
9319 * NOTE: This function increments the reference counter associated with the
9320 * trace array returned. This makes sure it cannot be freed while in use.
9321 * Use trace_array_put() once the trace array is no longer needed.
9322 * If the trace_array is to be freed, trace_array_destroy() needs to
9323 * be called after the trace_array_put(), or simply let user space delete
9324 * it from the tracefs instances directory. But until the
9325 * trace_array_put() is called, user space can not delete it.
9328 struct trace_array *trace_array_get_by_name(const char *name)
9330 struct trace_array *tr;
9332 mutex_lock(&event_mutex);
9333 mutex_lock(&trace_types_lock);
9335 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9336 if (tr->name && strcmp(tr->name, name) == 0)
9340 tr = trace_array_create(name);
9348 mutex_unlock(&trace_types_lock);
9349 mutex_unlock(&event_mutex);
9352 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9354 static int __remove_instance(struct trace_array *tr)
9358 /* Reference counter for a newly created trace array = 1. */
9359 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9362 list_del(&tr->list);
9364 /* Disable all the flags that were enabled coming in */
9365 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9366 if ((1 << i) & ZEROED_TRACE_FLAGS)
9367 set_tracer_flag(tr, 1 << i, 0);
9370 tracing_set_nop(tr);
9371 clear_ftrace_function_probes(tr);
9372 event_trace_del_tracer(tr);
9373 ftrace_clear_pids(tr);
9374 ftrace_destroy_function_files(tr);
9375 tracefs_remove(tr->dir);
9376 free_percpu(tr->last_func_repeats);
9377 free_trace_buffers(tr);
9379 for (i = 0; i < tr->nr_topts; i++) {
9380 kfree(tr->topts[i].topts);
9384 free_cpumask_var(tr->tracing_cpumask);
9391 int trace_array_destroy(struct trace_array *this_tr)
9393 struct trace_array *tr;
9399 mutex_lock(&event_mutex);
9400 mutex_lock(&trace_types_lock);
9404 /* Making sure trace array exists before destroying it. */
9405 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9406 if (tr == this_tr) {
9407 ret = __remove_instance(tr);
9412 mutex_unlock(&trace_types_lock);
9413 mutex_unlock(&event_mutex);
9417 EXPORT_SYMBOL_GPL(trace_array_destroy);
9419 static int instance_rmdir(const char *name)
9421 struct trace_array *tr;
9424 mutex_lock(&event_mutex);
9425 mutex_lock(&trace_types_lock);
9428 tr = trace_array_find(name);
9430 ret = __remove_instance(tr);
9432 mutex_unlock(&trace_types_lock);
9433 mutex_unlock(&event_mutex);
9438 static __init void create_trace_instances(struct dentry *d_tracer)
9440 struct trace_array *tr;
9442 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9445 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9448 mutex_lock(&event_mutex);
9449 mutex_lock(&trace_types_lock);
9451 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9454 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9455 "Failed to create instance directory\n"))
9459 mutex_unlock(&trace_types_lock);
9460 mutex_unlock(&event_mutex);
9464 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9466 struct trace_event_file *file;
9469 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9470 tr, &show_traces_fops);
9472 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9473 tr, &set_tracer_fops);
9475 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9476 tr, &tracing_cpumask_fops);
9478 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9479 tr, &tracing_iter_fops);
9481 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9484 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9485 tr, &tracing_pipe_fops);
9487 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9488 tr, &tracing_entries_fops);
9490 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9491 tr, &tracing_total_entries_fops);
9493 trace_create_file("free_buffer", 0200, d_tracer,
9494 tr, &tracing_free_buffer_fops);
9496 trace_create_file("trace_marker", 0220, d_tracer,
9497 tr, &tracing_mark_fops);
9499 file = __find_event_file(tr, "ftrace", "print");
9500 if (file && file->dir)
9501 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9502 file, &event_trigger_fops);
9503 tr->trace_marker_file = file;
9505 trace_create_file("trace_marker_raw", 0220, d_tracer,
9506 tr, &tracing_mark_raw_fops);
9508 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9511 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9512 tr, &rb_simple_fops);
9514 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9515 &trace_time_stamp_mode_fops);
9517 tr->buffer_percent = 50;
9519 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9520 tr, &buffer_percent_fops);
9522 create_trace_options_dir(tr);
9524 trace_create_maxlat_file(tr, d_tracer);
9526 if (ftrace_create_function_files(tr, d_tracer))
9527 MEM_FAIL(1, "Could not allocate function filter files");
9529 #ifdef CONFIG_TRACER_SNAPSHOT
9530 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9531 tr, &snapshot_fops);
9534 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9535 tr, &tracing_err_log_fops);
9537 for_each_tracing_cpu(cpu)
9538 tracing_init_tracefs_percpu(tr, cpu);
9540 ftrace_init_tracefs(tr, d_tracer);
9543 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9545 struct vfsmount *mnt;
9546 struct file_system_type *type;
9549 * To maintain backward compatibility for tools that mount
9550 * debugfs to get to the tracing facility, tracefs is automatically
9551 * mounted to the debugfs/tracing directory.
9553 type = get_fs_type("tracefs");
9556 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9557 put_filesystem(type);
9566 * tracing_init_dentry - initialize top level trace array
9568 * This is called when creating files or directories in the tracing
9569 * directory. It is called via fs_initcall() by any of the boot up code
9570 * and expects to return the dentry of the top level tracing directory.
9572 int tracing_init_dentry(void)
9574 struct trace_array *tr = &global_trace;
9576 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9577 pr_warn("Tracing disabled due to lockdown\n");
9581 /* The top level trace array uses NULL as parent */
9585 if (WARN_ON(!tracefs_initialized()))
9589 * As there may still be users that expect the tracing
9590 * files to exist in debugfs/tracing, we must automount
9591 * the tracefs file system there, so older tools still
9592 * work with the newer kernel.
9594 tr->dir = debugfs_create_automount("tracing", NULL,
9595 trace_automount, NULL);
9600 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9601 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9603 static struct workqueue_struct *eval_map_wq __initdata;
9604 static struct work_struct eval_map_work __initdata;
9606 static void __init eval_map_work_func(struct work_struct *work)
9610 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9611 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9614 static int __init trace_eval_init(void)
9616 INIT_WORK(&eval_map_work, eval_map_work_func);
9618 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9620 pr_err("Unable to allocate eval_map_wq\n");
9622 eval_map_work_func(&eval_map_work);
9626 queue_work(eval_map_wq, &eval_map_work);
9630 static int __init trace_eval_sync(void)
9632 /* Make sure the eval map updates are finished */
9634 destroy_workqueue(eval_map_wq);
9638 late_initcall_sync(trace_eval_sync);
9641 #ifdef CONFIG_MODULES
9642 static void trace_module_add_evals(struct module *mod)
9644 if (!mod->num_trace_evals)
9648 * Modules with bad taint do not have events created, do
9649 * not bother with enums either.
9651 if (trace_module_has_bad_taint(mod))
9654 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9657 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9658 static void trace_module_remove_evals(struct module *mod)
9660 union trace_eval_map_item *map;
9661 union trace_eval_map_item **last = &trace_eval_maps;
9663 if (!mod->num_trace_evals)
9666 mutex_lock(&trace_eval_mutex);
9668 map = trace_eval_maps;
9671 if (map->head.mod == mod)
9673 map = trace_eval_jmp_to_tail(map);
9674 last = &map->tail.next;
9675 map = map->tail.next;
9680 *last = trace_eval_jmp_to_tail(map)->tail.next;
9683 mutex_unlock(&trace_eval_mutex);
9686 static inline void trace_module_remove_evals(struct module *mod) { }
9687 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9689 static int trace_module_notify(struct notifier_block *self,
9690 unsigned long val, void *data)
9692 struct module *mod = data;
9695 case MODULE_STATE_COMING:
9696 trace_module_add_evals(mod);
9698 case MODULE_STATE_GOING:
9699 trace_module_remove_evals(mod);
9706 static struct notifier_block trace_module_nb = {
9707 .notifier_call = trace_module_notify,
9710 #endif /* CONFIG_MODULES */
9712 static __init int tracer_init_tracefs(void)
9716 trace_access_lock_init();
9718 ret = tracing_init_dentry();
9724 init_tracer_tracefs(&global_trace, NULL);
9725 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9727 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9728 &global_trace, &tracing_thresh_fops);
9730 trace_create_file("README", TRACE_MODE_READ, NULL,
9731 NULL, &tracing_readme_fops);
9733 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9734 NULL, &tracing_saved_cmdlines_fops);
9736 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9737 NULL, &tracing_saved_cmdlines_size_fops);
9739 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9740 NULL, &tracing_saved_tgids_fops);
9744 trace_create_eval_file(NULL);
9746 #ifdef CONFIG_MODULES
9747 register_module_notifier(&trace_module_nb);
9750 #ifdef CONFIG_DYNAMIC_FTRACE
9751 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9752 NULL, &tracing_dyn_info_fops);
9755 create_trace_instances(NULL);
9757 update_tracer_options(&global_trace);
9762 fs_initcall(tracer_init_tracefs);
9764 static int trace_panic_handler(struct notifier_block *this,
9765 unsigned long event, void *unused)
9767 if (ftrace_dump_on_oops)
9768 ftrace_dump(ftrace_dump_on_oops);
9772 static struct notifier_block trace_panic_notifier = {
9773 .notifier_call = trace_panic_handler,
9775 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9778 static int trace_die_handler(struct notifier_block *self,
9784 if (ftrace_dump_on_oops)
9785 ftrace_dump(ftrace_dump_on_oops);
9793 static struct notifier_block trace_die_notifier = {
9794 .notifier_call = trace_die_handler,
9799 * printk is set to max of 1024, we really don't need it that big.
9800 * Nothing should be printing 1000 characters anyway.
9802 #define TRACE_MAX_PRINT 1000
9805 * Define here KERN_TRACE so that we have one place to modify
9806 * it if we decide to change what log level the ftrace dump
9809 #define KERN_TRACE KERN_EMERG
9812 trace_printk_seq(struct trace_seq *s)
9814 /* Probably should print a warning here. */
9815 if (s->seq.len >= TRACE_MAX_PRINT)
9816 s->seq.len = TRACE_MAX_PRINT;
9819 * More paranoid code. Although the buffer size is set to
9820 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9821 * an extra layer of protection.
9823 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9824 s->seq.len = s->seq.size - 1;
9826 /* should be zero ended, but we are paranoid. */
9827 s->buffer[s->seq.len] = 0;
9829 printk(KERN_TRACE "%s", s->buffer);
9834 void trace_init_global_iter(struct trace_iterator *iter)
9836 iter->tr = &global_trace;
9837 iter->trace = iter->tr->current_trace;
9838 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9839 iter->array_buffer = &global_trace.array_buffer;
9841 if (iter->trace && iter->trace->open)
9842 iter->trace->open(iter);
9844 /* Annotate start of buffers if we had overruns */
9845 if (ring_buffer_overruns(iter->array_buffer->buffer))
9846 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9848 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9849 if (trace_clocks[iter->tr->clock_id].in_ns)
9850 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9853 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9855 /* use static because iter can be a bit big for the stack */
9856 static struct trace_iterator iter;
9857 static atomic_t dump_running;
9858 struct trace_array *tr = &global_trace;
9859 unsigned int old_userobj;
9860 unsigned long flags;
9863 /* Only allow one dump user at a time. */
9864 if (atomic_inc_return(&dump_running) != 1) {
9865 atomic_dec(&dump_running);
9870 * Always turn off tracing when we dump.
9871 * We don't need to show trace output of what happens
9872 * between multiple crashes.
9874 * If the user does a sysrq-z, then they can re-enable
9875 * tracing with echo 1 > tracing_on.
9879 local_irq_save(flags);
9881 /* Simulate the iterator */
9882 trace_init_global_iter(&iter);
9883 /* Can not use kmalloc for iter.temp and iter.fmt */
9884 iter.temp = static_temp_buf;
9885 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9886 iter.fmt = static_fmt_buf;
9887 iter.fmt_size = STATIC_FMT_BUF_SIZE;
9889 for_each_tracing_cpu(cpu) {
9890 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9893 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9895 /* don't look at user memory in panic mode */
9896 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9898 switch (oops_dump_mode) {
9900 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9903 iter.cpu_file = raw_smp_processor_id();
9908 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9909 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9912 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9914 /* Did function tracer already get disabled? */
9915 if (ftrace_is_dead()) {
9916 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9917 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9921 * We need to stop all tracing on all CPUS to read
9922 * the next buffer. This is a bit expensive, but is
9923 * not done often. We fill all what we can read,
9924 * and then release the locks again.
9927 while (!trace_empty(&iter)) {
9930 printk(KERN_TRACE "---------------------------------\n");
9934 trace_iterator_reset(&iter);
9935 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9937 if (trace_find_next_entry_inc(&iter) != NULL) {
9940 ret = print_trace_line(&iter);
9941 if (ret != TRACE_TYPE_NO_CONSUME)
9942 trace_consume(&iter);
9944 touch_nmi_watchdog();
9946 trace_printk_seq(&iter.seq);
9950 printk(KERN_TRACE " (ftrace buffer empty)\n");
9952 printk(KERN_TRACE "---------------------------------\n");
9955 tr->trace_flags |= old_userobj;
9957 for_each_tracing_cpu(cpu) {
9958 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9960 atomic_dec(&dump_running);
9961 local_irq_restore(flags);
9963 EXPORT_SYMBOL_GPL(ftrace_dump);
9965 #define WRITE_BUFSIZE 4096
9967 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9968 size_t count, loff_t *ppos,
9969 int (*createfn)(const char *))
9971 char *kbuf, *buf, *tmp;
9976 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9980 while (done < count) {
9981 size = count - done;
9983 if (size >= WRITE_BUFSIZE)
9984 size = WRITE_BUFSIZE - 1;
9986 if (copy_from_user(kbuf, buffer + done, size)) {
9993 tmp = strchr(buf, '\n');
9996 size = tmp - buf + 1;
9999 if (done + size < count) {
10002 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10003 pr_warn("Line length is too long: Should be less than %d\n",
10004 WRITE_BUFSIZE - 2);
10011 /* Remove comments */
10012 tmp = strchr(buf, '#');
10017 ret = createfn(buf);
10022 } while (done < count);
10032 __init static int tracer_alloc_buffers(void)
10038 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10039 pr_warn("Tracing disabled due to lockdown\n");
10044 * Make sure we don't accidentally add more trace options
10045 * than we have bits for.
10047 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10049 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10052 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10053 goto out_free_buffer_mask;
10055 /* Only allocate trace_printk buffers if a trace_printk exists */
10056 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10057 /* Must be called before global_trace.buffer is allocated */
10058 trace_printk_init_buffers();
10060 /* To save memory, keep the ring buffer size to its minimum */
10061 if (ring_buffer_expanded)
10062 ring_buf_size = trace_buf_size;
10066 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10067 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10069 raw_spin_lock_init(&global_trace.start_lock);
10072 * The prepare callbacks allocates some memory for the ring buffer. We
10073 * don't free the buffer if the CPU goes down. If we were to free
10074 * the buffer, then the user would lose any trace that was in the
10075 * buffer. The memory will be removed once the "instance" is removed.
10077 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10078 "trace/RB:preapre", trace_rb_cpu_prepare,
10081 goto out_free_cpumask;
10082 /* Used for event triggers */
10084 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10086 goto out_rm_hp_state;
10088 if (trace_create_savedcmd() < 0)
10089 goto out_free_temp_buffer;
10091 /* TODO: make the number of buffers hot pluggable with CPUS */
10092 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10093 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10094 goto out_free_savedcmd;
10097 if (global_trace.buffer_disabled)
10100 if (trace_boot_clock) {
10101 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10103 pr_warn("Trace clock %s not defined, going back to default\n",
10108 * register_tracer() might reference current_trace, so it
10109 * needs to be set before we register anything. This is
10110 * just a bootstrap of current_trace anyway.
10112 global_trace.current_trace = &nop_trace;
10114 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10116 ftrace_init_global_array_ops(&global_trace);
10118 init_trace_flags_index(&global_trace);
10120 register_tracer(&nop_trace);
10122 /* Function tracing may start here (via kernel command line) */
10123 init_function_trace();
10125 /* All seems OK, enable tracing */
10126 tracing_disabled = 0;
10128 atomic_notifier_chain_register(&panic_notifier_list,
10129 &trace_panic_notifier);
10131 register_die_notifier(&trace_die_notifier);
10133 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10135 INIT_LIST_HEAD(&global_trace.systems);
10136 INIT_LIST_HEAD(&global_trace.events);
10137 INIT_LIST_HEAD(&global_trace.hist_vars);
10138 INIT_LIST_HEAD(&global_trace.err_log);
10139 list_add(&global_trace.list, &ftrace_trace_arrays);
10141 apply_trace_boot_options();
10143 register_snapshot_cmd();
10150 free_saved_cmdlines_buffer(savedcmd);
10151 out_free_temp_buffer:
10152 ring_buffer_free(temp_buffer);
10154 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10156 free_cpumask_var(global_trace.tracing_cpumask);
10157 out_free_buffer_mask:
10158 free_cpumask_var(tracing_buffer_mask);
10163 void __init ftrace_boot_snapshot(void)
10165 if (snapshot_at_boot) {
10166 tracing_snapshot();
10167 internal_trace_puts("** Boot snapshot taken **\n");
10171 void __init early_trace_init(void)
10173 if (tracepoint_printk) {
10174 tracepoint_print_iter =
10175 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10176 if (MEM_FAIL(!tracepoint_print_iter,
10177 "Failed to allocate trace iterator\n"))
10178 tracepoint_printk = 0;
10180 static_key_enable(&tracepoint_printk_key.key);
10182 tracer_alloc_buffers();
10185 void __init trace_init(void)
10187 trace_event_init();
10190 __init static void clear_boot_tracer(void)
10193 * The default tracer at boot buffer is an init section.
10194 * This function is called in lateinit. If we did not
10195 * find the boot tracer, then clear it out, to prevent
10196 * later registration from accessing the buffer that is
10197 * about to be freed.
10199 if (!default_bootup_tracer)
10202 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10203 default_bootup_tracer);
10204 default_bootup_tracer = NULL;
10207 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10208 __init static void tracing_set_default_clock(void)
10210 /* sched_clock_stable() is determined in late_initcall */
10211 if (!trace_boot_clock && !sched_clock_stable()) {
10212 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10213 pr_warn("Can not set tracing clock due to lockdown\n");
10217 printk(KERN_WARNING
10218 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10219 "If you want to keep using the local clock, then add:\n"
10220 " \"trace_clock=local\"\n"
10221 "on the kernel command line\n");
10222 tracing_set_clock(&global_trace, "global");
10226 static inline void tracing_set_default_clock(void) { }
10229 __init static int late_trace_init(void)
10231 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10232 static_key_disable(&tracepoint_printk_key.key);
10233 tracepoint_printk = 0;
10236 tracing_set_default_clock();
10237 clear_boot_tracer();
10241 late_initcall_sync(late_trace_init);