1 // SPDX-License-Identifier: GPL-2.0
3 #ifndef _LINUX_KERNEL_TRACE_H
4 #define _LINUX_KERNEL_TRACE_H
7 #include <linux/atomic.h>
8 #include <linux/sched.h>
9 #include <linux/clocksource.h>
10 #include <linux/ring_buffer.h>
11 #include <linux/mmiotrace.h>
12 #include <linux/tracepoint.h>
13 #include <linux/ftrace.h>
14 #include <linux/trace.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/trace_seq.h>
17 #include <linux/trace_events.h>
18 #include <linux/compiler.h>
19 #include <linux/glob.h>
20 #include <linux/irq_work.h>
21 #include <linux/workqueue.h>
22 #include <linux/ctype.h>
24 #ifdef CONFIG_FTRACE_SYSCALLS
25 #include <asm/unistd.h> /* For NR_SYSCALLS */
26 #include <asm/syscall.h> /* some archs define it here */
30 __TRACE_FIRST_TYPE = 0,
54 #define __field(type, item) type item;
57 #define __field_fn(type, item) type item;
60 #define __field_struct(type, item) __field(type, item)
63 #define __field_desc(type, container, item)
66 #define __field_packed(type, container, item)
69 #define __array(type, item, size) type item[size];
72 #define __array_desc(type, container, item, size)
74 #undef __dynamic_array
75 #define __dynamic_array(type, item) type item[];
78 #define F_STRUCT(args...) args
81 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
82 struct struct_name { \
83 struct trace_entry ent; \
87 #undef FTRACE_ENTRY_DUP
88 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
90 #undef FTRACE_ENTRY_REG
91 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
92 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
94 #undef FTRACE_ENTRY_PACKED
95 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
96 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
98 #include "trace_entries.h"
100 /* Use this for memory failure errors */
101 #define MEM_FAIL(condition, fmt, ...) ({ \
102 static bool __section(".data.once") __warned; \
103 int __ret_warn_once = !!(condition); \
105 if (unlikely(__ret_warn_once && !__warned)) { \
107 pr_err("ERROR: " fmt, ##__VA_ARGS__); \
109 unlikely(__ret_warn_once); \
113 * syscalls are special, and need special handling, this is why
114 * they are not included in trace_entries.h
116 struct syscall_trace_enter {
117 struct trace_entry ent;
119 unsigned long args[];
122 struct syscall_trace_exit {
123 struct trace_entry ent;
128 struct kprobe_trace_entry_head {
129 struct trace_entry ent;
133 struct kretprobe_trace_entry_head {
134 struct trace_entry ent;
136 unsigned long ret_ip;
140 * trace_flag_type is an enumeration that holds different
141 * states when a trace occurs. These are:
142 * IRQS_OFF - interrupts were disabled
143 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
144 * NEED_RESCHED - reschedule is requested
145 * HARDIRQ - inside an interrupt handler
146 * SOFTIRQ - inside a softirq handler
148 enum trace_flag_type {
149 TRACE_FLAG_IRQS_OFF = 0x01,
150 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
151 TRACE_FLAG_NEED_RESCHED = 0x04,
152 TRACE_FLAG_HARDIRQ = 0x08,
153 TRACE_FLAG_SOFTIRQ = 0x10,
154 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
155 TRACE_FLAG_NMI = 0x40,
158 #define TRACE_BUF_SIZE 1024
163 * The CPU trace array - it consists of thousands of trace entries
164 * plus some other descriptor data: (for example which task started
167 struct trace_array_cpu {
169 void *buffer_page; /* ring buffer spare */
171 unsigned long entries;
172 unsigned long saved_latency;
173 unsigned long critical_start;
174 unsigned long critical_end;
175 unsigned long critical_sequence;
177 unsigned long policy;
178 unsigned long rt_priority;
179 unsigned long skipped_entries;
180 u64 preempt_timestamp;
183 char comm[TASK_COMM_LEN];
185 #ifdef CONFIG_FUNCTION_TRACER
186 int ftrace_ignore_pid;
192 struct trace_option_dentry;
194 struct array_buffer {
195 struct trace_array *tr;
196 struct trace_buffer *buffer;
197 struct trace_array_cpu __percpu *data;
202 #define TRACE_FLAGS_MAX_SIZE 32
204 struct trace_options {
205 struct tracer *tracer;
206 struct trace_option_dentry *topts;
209 struct trace_pid_list {
216 TRACE_NO_PIDS = BIT(1),
219 static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
220 struct trace_pid_list *no_pid_list)
222 /* Return true if the pid list in type has pids */
223 return ((type & TRACE_PIDS) && pid_list) ||
224 ((type & TRACE_NO_PIDS) && no_pid_list);
227 static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
228 struct trace_pid_list *no_pid_list)
231 * Turning off what is in @type, return true if the "other"
232 * pid list, still has pids in it.
234 return (!(type & TRACE_PIDS) && pid_list) ||
235 (!(type & TRACE_NO_PIDS) && no_pid_list);
238 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
241 * struct cond_snapshot - conditional snapshot data and callback
243 * The cond_snapshot structure encapsulates a callback function and
244 * data associated with the snapshot for a given tracing instance.
246 * When a snapshot is taken conditionally, by invoking
247 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
248 * passed in turn to the cond_snapshot.update() function. That data
249 * can be compared by the update() implementation with the cond_data
250 * contained within the struct cond_snapshot instance associated with
251 * the trace_array. Because the tr->max_lock is held throughout the
252 * update() call, the update() function can directly retrieve the
253 * cond_snapshot and cond_data associated with the per-instance
254 * snapshot associated with the trace_array.
256 * The cond_snapshot.update() implementation can save data to be
257 * associated with the snapshot if it decides to, and returns 'true'
258 * in that case, or it returns 'false' if the conditional snapshot
259 * shouldn't be taken.
261 * The cond_snapshot instance is created and associated with the
262 * user-defined cond_data by tracing_cond_snapshot_enable().
263 * Likewise, the cond_snapshot instance is destroyed and is no longer
264 * associated with the trace instance by
265 * tracing_cond_snapshot_disable().
267 * The method below is required.
269 * @update: When a conditional snapshot is invoked, the update()
270 * callback function is invoked with the tr->max_lock held. The
271 * update() implementation signals whether or not to actually
272 * take the snapshot, by returning 'true' if so, 'false' if no
273 * snapshot should be taken. Because the max_lock is held for
274 * the duration of update(), the implementation is safe to
275 * directly retrieved and save any implementation data it needs
276 * to in association with the snapshot.
278 struct cond_snapshot {
280 cond_update_fn_t update;
284 * The trace array - an array of per-CPU trace arrays. This is the
285 * highest level data structure that individual tracers deal with.
286 * They have on/off state as well:
289 struct list_head list;
291 struct array_buffer array_buffer;
292 #ifdef CONFIG_TRACER_MAX_TRACE
294 * The max_buffer is used to snapshot the trace when a maximum
295 * latency is reached, or when the user initiates a snapshot.
296 * Some tracers will use this to store a maximum trace while
297 * it continues examining live traces.
299 * The buffers for the max_buffer are set up the same as the array_buffer
300 * When a snapshot is taken, the buffer of the max_buffer is swapped
301 * with the buffer of the array_buffer and the buffers are reset for
302 * the array_buffer so the tracing can continue.
304 struct array_buffer max_buffer;
305 bool allocated_snapshot;
307 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
308 unsigned long max_latency;
309 #ifdef CONFIG_FSNOTIFY
310 struct dentry *d_max_latency;
311 struct work_struct fsnotify_work;
312 struct irq_work fsnotify_irqwork;
315 struct trace_pid_list __rcu *filtered_pids;
316 struct trace_pid_list __rcu *filtered_no_pids;
318 * max_lock is used to protect the swapping of buffers
319 * when taking a max snapshot. The buffers themselves are
320 * protected by per_cpu spinlocks. But the action of the swap
321 * needs its own lock.
323 * This is defined as a arch_spinlock_t in order to help
324 * with performance when lockdep debugging is enabled.
326 * It is also used in other places outside the update_max_tr
327 * so it needs to be defined outside of the
328 * CONFIG_TRACER_MAX_TRACE.
330 arch_spinlock_t max_lock;
332 #ifdef CONFIG_FTRACE_SYSCALLS
333 int sys_refcount_enter;
334 int sys_refcount_exit;
335 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
336 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
343 unsigned int n_err_log_entries;
344 struct tracer *current_trace;
345 unsigned int trace_flags;
346 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
348 raw_spinlock_t start_lock;
349 struct list_head err_log;
351 struct dentry *options;
352 struct dentry *percpu_dir;
353 struct dentry *event_dir;
354 struct trace_options *topts;
355 struct list_head systems;
356 struct list_head events;
357 struct trace_event_file *trace_marker_file;
358 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
361 #ifdef CONFIG_FUNCTION_TRACER
362 struct ftrace_ops *ops;
363 struct trace_pid_list __rcu *function_pids;
364 struct trace_pid_list __rcu *function_no_pids;
365 #ifdef CONFIG_DYNAMIC_FTRACE
366 /* All of these are protected by the ftrace_lock */
367 struct list_head func_probes;
368 struct list_head mod_trace;
369 struct list_head mod_notrace;
371 /* function tracing enabled */
372 int function_enabled;
374 int time_stamp_abs_ref;
375 struct list_head hist_vars;
376 #ifdef CONFIG_TRACER_SNAPSHOT
377 struct cond_snapshot *cond_snapshot;
382 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
385 extern struct list_head ftrace_trace_arrays;
387 extern struct mutex trace_types_lock;
389 extern int trace_array_get(struct trace_array *tr);
390 extern int tracing_check_open_get_tr(struct trace_array *tr);
391 extern struct trace_array *trace_array_find(const char *instance);
392 extern struct trace_array *trace_array_find_get(const char *instance);
394 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
395 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
397 extern bool trace_clock_in_ns(struct trace_array *tr);
400 * The global tracer (top) should be the first trace array added,
401 * but we check the flag anyway.
403 static inline struct trace_array *top_trace_array(void)
405 struct trace_array *tr;
407 if (list_empty(&ftrace_trace_arrays))
410 tr = list_entry(ftrace_trace_arrays.prev,
412 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
416 #define FTRACE_CMP_TYPE(var, type) \
417 __builtin_types_compatible_p(typeof(var), type *)
420 #define IF_ASSIGN(var, entry, etype, id) \
421 if (FTRACE_CMP_TYPE(var, etype)) { \
422 var = (typeof(var))(entry); \
423 WARN_ON(id != 0 && (entry)->type != id); \
427 /* Will cause compile errors if type is not found. */
428 extern void __ftrace_bad_type(void);
431 * The trace_assign_type is a verifier that the entry type is
432 * the same as the type being assigned. To add new types simply
433 * add a line with the following format:
435 * IF_ASSIGN(var, ent, type, id);
437 * Where "type" is the trace type that includes the trace_entry
438 * as the "ent" item. And "id" is the trace identifier that is
439 * used in the trace_type enum.
441 * If the type can have more than one id, then use zero.
443 #define trace_assign_type(var, ent) \
445 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
446 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
447 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
448 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
449 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
450 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
451 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
452 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
453 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
454 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
456 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
458 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
459 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
461 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
463 __ftrace_bad_type(); \
467 * An option specific to a tracer. This is a boolean value.
468 * The bit is the bit index that sets its value on the
469 * flags value in struct tracer_flags.
472 const char *name; /* Will appear on the trace_options file */
473 u32 bit; /* Mask assigned in val field in tracer_flags */
477 * The set of specific options for a tracer. Your tracer
478 * have to set the initial value of the flags val.
480 struct tracer_flags {
482 struct tracer_opt *opts;
483 struct tracer *trace;
486 /* Makes more easy to define a tracer opt */
487 #define TRACER_OPT(s, b) .name = #s, .bit = b
490 struct trace_option_dentry {
491 struct tracer_opt *opt;
492 struct tracer_flags *flags;
493 struct trace_array *tr;
494 struct dentry *entry;
498 * struct tracer - a specific tracer and its callbacks to interact with tracefs
499 * @name: the name chosen to select it on the available_tracers file
500 * @init: called when one switches to this tracer (echo name > current_tracer)
501 * @reset: called when one switches to another tracer
502 * @start: called when tracing is unpaused (echo 1 > tracing_on)
503 * @stop: called when tracing is paused (echo 0 > tracing_on)
504 * @update_thresh: called when tracing_thresh is updated
505 * @open: called when the trace file is opened
506 * @pipe_open: called when the trace_pipe file is opened
507 * @close: called when the trace file is released
508 * @pipe_close: called when the trace_pipe file is released
509 * @read: override the default read callback on trace_pipe
510 * @splice_read: override the default splice_read callback on trace_pipe
511 * @selftest: selftest to run on boot (see trace_selftest.c)
512 * @print_headers: override the first lines that describe your columns
513 * @print_line: callback that prints a trace
514 * @set_flag: signals one of your private flags changed (trace_options file)
515 * @flags: your private flags
519 int (*init)(struct trace_array *tr);
520 void (*reset)(struct trace_array *tr);
521 void (*start)(struct trace_array *tr);
522 void (*stop)(struct trace_array *tr);
523 int (*update_thresh)(struct trace_array *tr);
524 void (*open)(struct trace_iterator *iter);
525 void (*pipe_open)(struct trace_iterator *iter);
526 void (*close)(struct trace_iterator *iter);
527 void (*pipe_close)(struct trace_iterator *iter);
528 ssize_t (*read)(struct trace_iterator *iter,
529 struct file *filp, char __user *ubuf,
530 size_t cnt, loff_t *ppos);
531 ssize_t (*splice_read)(struct trace_iterator *iter,
534 struct pipe_inode_info *pipe,
537 #ifdef CONFIG_FTRACE_STARTUP_TEST
538 int (*selftest)(struct tracer *trace,
539 struct trace_array *tr);
541 void (*print_header)(struct seq_file *m);
542 enum print_line_t (*print_line)(struct trace_iterator *iter);
543 /* If you handled the flag setting, return 0 */
544 int (*set_flag)(struct trace_array *tr,
545 u32 old_flags, u32 bit, int set);
546 /* Return 0 if OK with change, else return non-zero */
547 int (*flag_changed)(struct trace_array *tr,
550 struct tracer_flags *flags;
553 bool allow_instances;
554 #ifdef CONFIG_TRACER_MAX_TRACE
557 /* True if tracer cannot be enabled in kernel param */
561 static inline struct ring_buffer_iter *
562 trace_buffer_iter(struct trace_iterator *iter, int cpu)
564 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
567 int tracer_init(struct tracer *t, struct trace_array *tr);
568 int tracing_is_enabled(void);
569 void tracing_reset_online_cpus(struct array_buffer *buf);
570 void tracing_reset_current(int cpu);
571 void tracing_reset_all_online_cpus(void);
572 int tracing_open_generic(struct inode *inode, struct file *filp);
573 int tracing_open_generic_tr(struct inode *inode, struct file *filp);
574 bool tracing_is_disabled(void);
575 bool tracer_tracing_is_on(struct trace_array *tr);
576 void tracer_tracing_on(struct trace_array *tr);
577 void tracer_tracing_off(struct trace_array *tr);
578 struct dentry *trace_create_file(const char *name,
580 struct dentry *parent,
582 const struct file_operations *fops);
584 int tracing_init_dentry(void);
586 struct ring_buffer_event;
588 struct ring_buffer_event *
589 trace_buffer_lock_reserve(struct trace_buffer *buffer,
592 unsigned int trace_ctx);
594 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
595 struct trace_array_cpu *data);
597 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
598 int *ent_cpu, u64 *ent_ts);
600 void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
601 struct ring_buffer_event *event);
603 int trace_empty(struct trace_iterator *iter);
605 void *trace_find_next_entry_inc(struct trace_iterator *iter);
607 void trace_init_global_iter(struct trace_iterator *iter);
609 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
611 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
612 unsigned long trace_total_entries(struct trace_array *tr);
614 void trace_function(struct trace_array *tr,
616 unsigned long parent_ip,
617 unsigned int trace_ctx);
618 void trace_graph_function(struct trace_array *tr,
620 unsigned long parent_ip,
621 unsigned int trace_ctx);
622 void trace_latency_header(struct seq_file *m);
623 void trace_default_header(struct seq_file *m);
624 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
625 int trace_empty(struct trace_iterator *iter);
627 void trace_graph_return(struct ftrace_graph_ret *trace);
628 int trace_graph_entry(struct ftrace_graph_ent *trace);
629 void set_graph_array(struct trace_array *tr);
631 void tracing_start_cmdline_record(void);
632 void tracing_stop_cmdline_record(void);
633 void tracing_start_tgid_record(void);
634 void tracing_stop_tgid_record(void);
636 int register_tracer(struct tracer *type);
637 int is_tracing_stopped(void);
639 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
641 extern cpumask_var_t __read_mostly tracing_buffer_mask;
643 #define for_each_tracing_cpu(cpu) \
644 for_each_cpu(cpu, tracing_buffer_mask)
646 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
648 extern unsigned long tracing_thresh;
654 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
656 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
657 struct trace_pid_list *filtered_no_pids,
658 struct task_struct *task);
659 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
660 struct task_struct *self,
661 struct task_struct *task);
662 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
663 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
664 int trace_pid_show(struct seq_file *m, void *v);
665 void trace_free_pid_list(struct trace_pid_list *pid_list);
666 int trace_pid_write(struct trace_pid_list *filtered_pids,
667 struct trace_pid_list **new_pid_list,
668 const char __user *ubuf, size_t cnt);
670 #ifdef CONFIG_TRACER_MAX_TRACE
671 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
673 void update_max_tr_single(struct trace_array *tr,
674 struct task_struct *tsk, int cpu);
675 #endif /* CONFIG_TRACER_MAX_TRACE */
677 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
678 defined(CONFIG_FSNOTIFY)
680 void latency_fsnotify(struct trace_array *tr);
684 static inline void latency_fsnotify(struct trace_array *tr) { }
688 #ifdef CONFIG_STACKTRACE
689 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
691 static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
695 #endif /* CONFIG_STACKTRACE */
697 extern u64 ftrace_now(int cpu);
699 extern void trace_find_cmdline(int pid, char comm[]);
700 extern int trace_find_tgid(int pid);
701 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
703 #ifdef CONFIG_DYNAMIC_FTRACE
704 extern unsigned long ftrace_update_tot_cnt;
705 extern unsigned long ftrace_number_of_pages;
706 extern unsigned long ftrace_number_of_groups;
707 void ftrace_init_trace_array(struct trace_array *tr);
709 static inline void ftrace_init_trace_array(struct trace_array *tr) { }
711 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
712 extern int DYN_FTRACE_TEST_NAME(void);
713 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
714 extern int DYN_FTRACE_TEST_NAME2(void);
716 extern bool ring_buffer_expanded;
717 extern bool tracing_selftest_disabled;
719 #ifdef CONFIG_FTRACE_STARTUP_TEST
720 extern void __init disable_tracing_selftest(const char *reason);
722 extern int trace_selftest_startup_function(struct tracer *trace,
723 struct trace_array *tr);
724 extern int trace_selftest_startup_function_graph(struct tracer *trace,
725 struct trace_array *tr);
726 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
727 struct trace_array *tr);
728 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
729 struct trace_array *tr);
730 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
731 struct trace_array *tr);
732 extern int trace_selftest_startup_wakeup(struct tracer *trace,
733 struct trace_array *tr);
734 extern int trace_selftest_startup_nop(struct tracer *trace,
735 struct trace_array *tr);
736 extern int trace_selftest_startup_branch(struct tracer *trace,
737 struct trace_array *tr);
739 * Tracer data references selftest functions that only occur
740 * on boot up. These can be __init functions. Thus, when selftests
741 * are enabled, then the tracers need to reference __init functions.
743 #define __tracer_data __refdata
745 static inline void __init disable_tracing_selftest(const char *reason)
748 /* Tracers are seldom changed. Optimize when selftests are disabled. */
749 #define __tracer_data __read_mostly
750 #endif /* CONFIG_FTRACE_STARTUP_TEST */
752 extern void *head_page(struct trace_array_cpu *data);
753 extern unsigned long long ns2usecs(u64 nsec);
755 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
757 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
759 trace_array_vprintk(struct trace_array *tr,
760 unsigned long ip, const char *fmt, va_list args);
761 int trace_array_printk_buf(struct trace_buffer *buffer,
762 unsigned long ip, const char *fmt, ...);
763 void trace_printk_seq(struct trace_seq *s);
764 enum print_line_t print_trace_line(struct trace_iterator *iter);
766 extern char trace_find_mark(unsigned long long duration);
770 struct ftrace_mod_load {
771 struct list_head list;
778 FTRACE_HASH_FL_MOD = (1 << 0),
782 unsigned long size_bits;
783 struct hlist_head *buckets;
789 struct ftrace_func_entry *
790 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
792 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
794 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
797 /* Standard output formatting function used for function return traces */
798 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
801 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
802 #define TRACE_GRAPH_PRINT_CPU 0x2
803 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
804 #define TRACE_GRAPH_PRINT_PROC 0x8
805 #define TRACE_GRAPH_PRINT_DURATION 0x10
806 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
807 #define TRACE_GRAPH_PRINT_REL_TIME 0x40
808 #define TRACE_GRAPH_PRINT_IRQS 0x80
809 #define TRACE_GRAPH_PRINT_TAIL 0x100
810 #define TRACE_GRAPH_SLEEP_TIME 0x200
811 #define TRACE_GRAPH_GRAPH_TIME 0x400
812 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
813 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
815 extern void ftrace_graph_sleep_time_control(bool enable);
817 #ifdef CONFIG_FUNCTION_PROFILER
818 extern void ftrace_graph_graph_time_control(bool enable);
820 static inline void ftrace_graph_graph_time_control(bool enable) { }
823 extern enum print_line_t
824 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
825 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
827 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
828 extern void graph_trace_open(struct trace_iterator *iter);
829 extern void graph_trace_close(struct trace_iterator *iter);
830 extern int __trace_graph_entry(struct trace_array *tr,
831 struct ftrace_graph_ent *trace,
832 unsigned int trace_ctx);
833 extern void __trace_graph_return(struct trace_array *tr,
834 struct ftrace_graph_ret *trace,
835 unsigned int trace_ctx);
837 #ifdef CONFIG_DYNAMIC_FTRACE
838 extern struct ftrace_hash __rcu *ftrace_graph_hash;
839 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
841 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
843 unsigned long addr = trace->func;
845 struct ftrace_hash *hash;
847 preempt_disable_notrace();
850 * Have to open code "rcu_dereference_sched()" because the
851 * function graph tracer can be called when RCU is not
853 * Protected with schedule_on_each_cpu(ftrace_sync)
855 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
857 if (ftrace_hash_empty(hash)) {
862 if (ftrace_lookup_ip(hash, addr)) {
865 * This needs to be cleared on the return functions
866 * when the depth is zero.
868 trace_recursion_set(TRACE_GRAPH_BIT);
869 trace_recursion_set_depth(trace->depth);
872 * If no irqs are to be traced, but a set_graph_function
873 * is set, and called by an interrupt handler, we still
877 trace_recursion_set(TRACE_IRQ_BIT);
879 trace_recursion_clear(TRACE_IRQ_BIT);
884 preempt_enable_notrace();
888 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
890 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
891 trace->depth == trace_recursion_depth())
892 trace_recursion_clear(TRACE_GRAPH_BIT);
895 static inline int ftrace_graph_notrace_addr(unsigned long addr)
898 struct ftrace_hash *notrace_hash;
900 preempt_disable_notrace();
903 * Have to open code "rcu_dereference_sched()" because the
904 * function graph tracer can be called when RCU is not
906 * Protected with schedule_on_each_cpu(ftrace_sync)
908 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
911 if (ftrace_lookup_ip(notrace_hash, addr))
914 preempt_enable_notrace();
918 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
923 static inline int ftrace_graph_notrace_addr(unsigned long addr)
927 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
929 #endif /* CONFIG_DYNAMIC_FTRACE */
931 extern unsigned int fgraph_max_depth;
933 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
935 /* trace it when it is-nested-in or is a function enabled. */
936 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
937 ftrace_graph_addr(trace)) ||
938 (trace->depth < 0) ||
939 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
942 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
943 static inline enum print_line_t
944 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
946 return TRACE_TYPE_UNHANDLED;
948 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
950 extern struct list_head ftrace_pids;
952 #ifdef CONFIG_FUNCTION_TRACER
954 #define FTRACE_PID_IGNORE -1
955 #define FTRACE_PID_TRACE -2
957 struct ftrace_func_command {
958 struct list_head list;
960 int (*func)(struct trace_array *tr,
961 struct ftrace_hash *hash,
962 char *func, char *cmd,
963 char *params, int enable);
965 extern bool ftrace_filter_param __initdata;
966 static inline int ftrace_trace_task(struct trace_array *tr)
968 return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
971 extern int ftrace_is_dead(void);
972 int ftrace_create_function_files(struct trace_array *tr,
973 struct dentry *parent);
974 void ftrace_destroy_function_files(struct trace_array *tr);
975 int ftrace_allocate_ftrace_ops(struct trace_array *tr);
976 void ftrace_free_ftrace_ops(struct trace_array *tr);
977 void ftrace_init_global_array_ops(struct trace_array *tr);
978 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
979 void ftrace_reset_array_ops(struct trace_array *tr);
980 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
981 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
982 struct dentry *d_tracer);
983 void ftrace_clear_pids(struct trace_array *tr);
984 int init_function_trace(void);
985 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
987 static inline int ftrace_trace_task(struct trace_array *tr)
991 static inline int ftrace_is_dead(void) { return 0; }
993 ftrace_create_function_files(struct trace_array *tr,
994 struct dentry *parent)
998 static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
1002 static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
1003 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1004 static inline __init void
1005 ftrace_init_global_array_ops(struct trace_array *tr) { }
1006 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1007 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1008 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1009 static inline void ftrace_clear_pids(struct trace_array *tr) { }
1010 static inline int init_function_trace(void) { return 0; }
1011 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1012 /* ftace_func_t type is not defined, use macro instead of static inline */
1013 #define ftrace_init_array_ops(tr, func) do { } while (0)
1014 #endif /* CONFIG_FUNCTION_TRACER */
1016 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1018 struct ftrace_probe_ops {
1019 void (*func)(unsigned long ip,
1020 unsigned long parent_ip,
1021 struct trace_array *tr,
1022 struct ftrace_probe_ops *ops,
1024 int (*init)(struct ftrace_probe_ops *ops,
1025 struct trace_array *tr,
1026 unsigned long ip, void *init_data,
1028 void (*free)(struct ftrace_probe_ops *ops,
1029 struct trace_array *tr,
1030 unsigned long ip, void *data);
1031 int (*print)(struct seq_file *m,
1033 struct ftrace_probe_ops *ops,
1037 struct ftrace_func_mapper;
1038 typedef int (*ftrace_mapper_func)(void *data);
1040 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1041 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1043 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1044 unsigned long ip, void *data);
1045 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1047 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1048 ftrace_mapper_func free_func);
1051 register_ftrace_function_probe(char *glob, struct trace_array *tr,
1052 struct ftrace_probe_ops *ops, void *data);
1054 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1055 struct ftrace_probe_ops *ops);
1056 extern void clear_ftrace_function_probes(struct trace_array *tr);
1058 int register_ftrace_command(struct ftrace_func_command *cmd);
1059 int unregister_ftrace_command(struct ftrace_func_command *cmd);
1061 void ftrace_create_filter_files(struct ftrace_ops *ops,
1062 struct dentry *parent);
1063 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1065 extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1066 int len, int reset);
1067 extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1068 int len, int reset);
1070 struct ftrace_func_command;
1072 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1076 static inline __init int unregister_ftrace_command(char *cmd_name)
1080 static inline void clear_ftrace_function_probes(struct trace_array *tr)
1085 * The ops parameter passed in is usually undefined.
1086 * This must be a macro.
1088 #define ftrace_create_filter_files(ops, parent) do { } while (0)
1089 #define ftrace_destroy_filter_files(ops) do { } while (0)
1090 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1092 bool ftrace_event_is_function(struct trace_event_call *call);
1095 * struct trace_parser - servers for reading the user input separated by spaces
1096 * @cont: set if the input is not complete - no final space char was found
1097 * @buffer: holds the parsed user input
1098 * @idx: user input length
1099 * @size: buffer size
1101 struct trace_parser {
1108 static inline bool trace_parser_loaded(struct trace_parser *parser)
1110 return (parser->idx != 0);
1113 static inline bool trace_parser_cont(struct trace_parser *parser)
1115 return parser->cont;
1118 static inline void trace_parser_clear(struct trace_parser *parser)
1120 parser->cont = false;
1124 extern int trace_parser_get_init(struct trace_parser *parser, int size);
1125 extern void trace_parser_put(struct trace_parser *parser);
1126 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1127 size_t cnt, loff_t *ppos);
1130 * Only create function graph options if function graph is configured.
1132 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1133 # define FGRAPH_FLAGS \
1134 C(DISPLAY_GRAPH, "display-graph"),
1136 # define FGRAPH_FLAGS
1139 #ifdef CONFIG_BRANCH_TRACER
1140 # define BRANCH_FLAGS \
1141 C(BRANCH, "branch"),
1143 # define BRANCH_FLAGS
1146 #ifdef CONFIG_FUNCTION_TRACER
1147 # define FUNCTION_FLAGS \
1148 C(FUNCTION, "function-trace"), \
1149 C(FUNC_FORK, "function-fork"),
1150 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1152 # define FUNCTION_FLAGS
1153 # define FUNCTION_DEFAULT_FLAGS 0UL
1154 # define TRACE_ITER_FUNC_FORK 0UL
1157 #ifdef CONFIG_STACKTRACE
1158 # define STACK_FLAGS \
1159 C(STACKTRACE, "stacktrace"),
1161 # define STACK_FLAGS
1165 * trace_iterator_flags is an enumeration that defines bit
1166 * positions into trace_flags that controls the output.
1168 * NOTE: These bits must match the trace_options array in
1169 * trace.c (this macro guarantees it).
1171 #define TRACE_FLAGS \
1172 C(PRINT_PARENT, "print-parent"), \
1173 C(SYM_OFFSET, "sym-offset"), \
1174 C(SYM_ADDR, "sym-addr"), \
1175 C(VERBOSE, "verbose"), \
1179 C(BLOCK, "block"), \
1180 C(PRINTK, "trace_printk"), \
1181 C(ANNOTATE, "annotate"), \
1182 C(USERSTACKTRACE, "userstacktrace"), \
1183 C(SYM_USEROBJ, "sym-userobj"), \
1184 C(PRINTK_MSGONLY, "printk-msg-only"), \
1185 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1186 C(LATENCY_FMT, "latency-format"), \
1187 C(RECORD_CMD, "record-cmd"), \
1188 C(RECORD_TGID, "record-tgid"), \
1189 C(OVERWRITE, "overwrite"), \
1190 C(STOP_ON_FREE, "disable_on_free"), \
1191 C(IRQ_INFO, "irq-info"), \
1192 C(MARKERS, "markers"), \
1193 C(EVENT_FORK, "event-fork"), \
1194 C(PAUSE_ON_TRACE, "pause-on-trace"), \
1201 * By defining C, we can make TRACE_FLAGS a list of bit names
1202 * that will define the bits for the flag masks.
1205 #define C(a, b) TRACE_ITER_##a##_BIT
1207 enum trace_iterator_bits {
1209 /* Make sure we don't go more than we have bits for */
1214 * By redefining C, we can make TRACE_FLAGS a list of masks that
1215 * use the bits as defined above.
1218 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1220 enum trace_iterator_flags { TRACE_FLAGS };
1223 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1224 * control the output of kernel symbols.
1226 #define TRACE_ITER_SYM_MASK \
1227 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1229 extern struct tracer nop_trace;
1231 #ifdef CONFIG_BRANCH_TRACER
1232 extern int enable_branch_tracing(struct trace_array *tr);
1233 extern void disable_branch_tracing(void);
1234 static inline int trace_branch_enable(struct trace_array *tr)
1236 if (tr->trace_flags & TRACE_ITER_BRANCH)
1237 return enable_branch_tracing(tr);
1240 static inline void trace_branch_disable(void)
1242 /* due to races, always disable */
1243 disable_branch_tracing();
1246 static inline int trace_branch_enable(struct trace_array *tr)
1250 static inline void trace_branch_disable(void)
1253 #endif /* CONFIG_BRANCH_TRACER */
1255 /* set ring buffers to default size if not already done so */
1256 int tracing_update_buffers(void);
1258 struct ftrace_event_field {
1259 struct list_head link;
1270 struct event_filter {
1271 struct prog_entry __rcu *prog;
1272 char *filter_string;
1275 struct event_subsystem {
1276 struct list_head list;
1278 struct event_filter *filter;
1282 struct trace_subsystem_dir {
1283 struct list_head list;
1284 struct event_subsystem *subsystem;
1285 struct trace_array *tr;
1286 struct dentry *entry;
1291 extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1292 struct trace_buffer *buffer,
1293 struct ring_buffer_event *event);
1295 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1296 struct trace_buffer *buffer,
1297 struct ring_buffer_event *event,
1298 unsigned int trcace_ctx,
1299 struct pt_regs *regs);
1301 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1302 struct trace_buffer *buffer,
1303 struct ring_buffer_event *event,
1304 unsigned int trace_ctx)
1306 trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
1309 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1310 DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1311 void trace_buffered_event_disable(void);
1312 void trace_buffered_event_enable(void);
1315 __trace_event_discard_commit(struct trace_buffer *buffer,
1316 struct ring_buffer_event *event)
1318 if (this_cpu_read(trace_buffered_event) == event) {
1319 /* Simply release the temp buffer */
1320 this_cpu_dec(trace_buffered_event_cnt);
1323 ring_buffer_discard_commit(buffer, event);
1327 * Helper function for event_trigger_unlock_commit{_regs}().
1328 * If there are event triggers attached to this event that requires
1329 * filtering against its fields, then they will be called as the
1330 * entry already holds the field information of the current event.
1332 * It also checks if the event should be discarded or not.
1333 * It is to be discarded if the event is soft disabled and the
1334 * event was only recorded to process triggers, or if the event
1335 * filter is active and this event did not match the filters.
1337 * Returns true if the event is discarded, false otherwise.
1340 __event_trigger_test_discard(struct trace_event_file *file,
1341 struct trace_buffer *buffer,
1342 struct ring_buffer_event *event,
1344 enum event_trigger_type *tt)
1346 unsigned long eflags = file->flags;
1348 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1349 *tt = event_triggers_call(file, entry, event);
1351 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1352 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1353 !filter_match_preds(file->filter, entry))) {
1354 __trace_event_discard_commit(buffer, event);
1362 * event_trigger_unlock_commit - handle triggers and finish event commit
1363 * @file: The file pointer assoctiated to the event
1364 * @buffer: The ring buffer that the event is being written to
1365 * @event: The event meta data in the ring buffer
1366 * @entry: The event itself
1367 * @trace_ctx: The tracing context flags.
1369 * This is a helper function to handle triggers that require data
1370 * from the event itself. It also tests the event against filters and
1371 * if the event is soft disabled and should be discarded.
1374 event_trigger_unlock_commit(struct trace_event_file *file,
1375 struct trace_buffer *buffer,
1376 struct ring_buffer_event *event,
1377 void *entry, unsigned int trace_ctx)
1379 enum event_trigger_type tt = ETT_NONE;
1381 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1382 trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
1385 event_triggers_post_call(file, tt);
1389 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1390 * @file: The file pointer assoctiated to the event
1391 * @buffer: The ring buffer that the event is being written to
1392 * @event: The event meta data in the ring buffer
1393 * @entry: The event itself
1394 * @trace_ctx: The tracing context flags.
1396 * This is a helper function to handle triggers that require data
1397 * from the event itself. It also tests the event against filters and
1398 * if the event is soft disabled and should be discarded.
1400 * Same as event_trigger_unlock_commit() but calls
1401 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1404 event_trigger_unlock_commit_regs(struct trace_event_file *file,
1405 struct trace_buffer *buffer,
1406 struct ring_buffer_event *event,
1407 void *entry, unsigned int trace_ctx,
1408 struct pt_regs *regs)
1410 enum event_trigger_type tt = ETT_NONE;
1412 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1413 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1417 event_triggers_post_call(file, tt);
1420 #define FILTER_PRED_INVALID ((unsigned short)-1)
1421 #define FILTER_PRED_IS_RIGHT (1 << 15)
1422 #define FILTER_PRED_FOLD (1 << 15)
1425 * The max preds is the size of unsigned short with
1426 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1427 * and FOLD flags. The other is reserved.
1429 * 2^14 preds is way more than enough.
1431 #define MAX_FILTER_PRED 16384
1436 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1438 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1450 char pattern[MAX_FILTER_STR_VAL];
1453 regex_match_func match;
1456 struct filter_pred {
1457 filter_pred_fn_t fn;
1460 unsigned short *ops;
1461 struct ftrace_event_field *field;
1467 static inline bool is_string_field(struct ftrace_event_field *field)
1469 return field->filter_type == FILTER_DYN_STRING ||
1470 field->filter_type == FILTER_STATIC_STRING ||
1471 field->filter_type == FILTER_PTR_STRING ||
1472 field->filter_type == FILTER_COMM;
1475 static inline bool is_function_field(struct ftrace_event_field *field)
1477 return field->filter_type == FILTER_TRACE_FN;
1480 extern enum regex_type
1481 filter_parse_regex(char *buff, int len, char **search, int *not);
1482 extern void print_event_filter(struct trace_event_file *file,
1483 struct trace_seq *s);
1484 extern int apply_event_filter(struct trace_event_file *file,
1485 char *filter_string);
1486 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1487 char *filter_string);
1488 extern void print_subsystem_event_filter(struct event_subsystem *system,
1489 struct trace_seq *s);
1490 extern int filter_assign_type(const char *type);
1491 extern int create_event_filter(struct trace_array *tr,
1492 struct trace_event_call *call,
1493 char *filter_str, bool set_str,
1494 struct event_filter **filterp);
1495 extern void free_event_filter(struct event_filter *filter);
1497 struct ftrace_event_field *
1498 trace_find_event_field(struct trace_event_call *call, char *name);
1500 extern void trace_event_enable_cmd_record(bool enable);
1501 extern void trace_event_enable_tgid_record(bool enable);
1503 extern int event_trace_init(void);
1504 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1505 extern int event_trace_del_tracer(struct trace_array *tr);
1506 extern void __trace_early_add_events(struct trace_array *tr);
1508 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1511 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1515 static inline void *event_file_data(struct file *filp)
1517 return READ_ONCE(file_inode(filp)->i_private);
1520 extern struct mutex event_mutex;
1521 extern struct list_head ftrace_events;
1523 extern const struct file_operations event_trigger_fops;
1524 extern const struct file_operations event_hist_fops;
1525 extern const struct file_operations event_hist_debug_fops;
1526 extern const struct file_operations event_inject_fops;
1528 #ifdef CONFIG_HIST_TRIGGERS
1529 extern int register_trigger_hist_cmd(void);
1530 extern int register_trigger_hist_enable_disable_cmds(void);
1532 static inline int register_trigger_hist_cmd(void) { return 0; }
1533 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1536 extern int register_trigger_cmds(void);
1537 extern void clear_event_triggers(struct trace_array *tr);
1539 struct event_trigger_data {
1540 unsigned long count;
1542 struct event_trigger_ops *ops;
1543 struct event_command *cmd_ops;
1544 struct event_filter __rcu *filter;
1549 struct list_head list;
1551 struct list_head named_list;
1552 struct event_trigger_data *named_data;
1556 #define ENABLE_EVENT_STR "enable_event"
1557 #define DISABLE_EVENT_STR "disable_event"
1558 #define ENABLE_HIST_STR "enable_hist"
1559 #define DISABLE_HIST_STR "disable_hist"
1561 struct enable_trigger_data {
1562 struct trace_event_file *file;
1567 extern int event_enable_trigger_print(struct seq_file *m,
1568 struct event_trigger_ops *ops,
1569 struct event_trigger_data *data);
1570 extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1571 struct event_trigger_data *data);
1572 extern int event_enable_trigger_func(struct event_command *cmd_ops,
1573 struct trace_event_file *file,
1574 char *glob, char *cmd, char *param);
1575 extern int event_enable_register_trigger(char *glob,
1576 struct event_trigger_ops *ops,
1577 struct event_trigger_data *data,
1578 struct trace_event_file *file);
1579 extern void event_enable_unregister_trigger(char *glob,
1580 struct event_trigger_ops *ops,
1581 struct event_trigger_data *test,
1582 struct trace_event_file *file);
1583 extern void trigger_data_free(struct event_trigger_data *data);
1584 extern int event_trigger_init(struct event_trigger_ops *ops,
1585 struct event_trigger_data *data);
1586 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1587 int trigger_enable);
1588 extern void update_cond_flag(struct trace_event_file *file);
1589 extern int set_trigger_filter(char *filter_str,
1590 struct event_trigger_data *trigger_data,
1591 struct trace_event_file *file);
1592 extern struct event_trigger_data *find_named_trigger(const char *name);
1593 extern bool is_named_trigger(struct event_trigger_data *test);
1594 extern int save_named_trigger(const char *name,
1595 struct event_trigger_data *data);
1596 extern void del_named_trigger(struct event_trigger_data *data);
1597 extern void pause_named_trigger(struct event_trigger_data *data);
1598 extern void unpause_named_trigger(struct event_trigger_data *data);
1599 extern void set_named_trigger_data(struct event_trigger_data *data,
1600 struct event_trigger_data *named_data);
1601 extern struct event_trigger_data *
1602 get_named_trigger_data(struct event_trigger_data *data);
1603 extern int register_event_command(struct event_command *cmd);
1604 extern int unregister_event_command(struct event_command *cmd);
1605 extern int register_trigger_hist_enable_disable_cmds(void);
1608 * struct event_trigger_ops - callbacks for trace event triggers
1610 * The methods in this structure provide per-event trigger hooks for
1611 * various trigger operations.
1613 * All the methods below, except for @init() and @free(), must be
1616 * @func: The trigger 'probe' function called when the triggering
1617 * event occurs. The data passed into this callback is the data
1618 * that was supplied to the event_command @reg() function that
1619 * registered the trigger (see struct event_command) along with
1620 * the trace record, rec.
1622 * @init: An optional initialization function called for the trigger
1623 * when the trigger is registered (via the event_command reg()
1624 * function). This can be used to perform per-trigger
1625 * initialization such as incrementing a per-trigger reference
1626 * count, for instance. This is usually implemented by the
1627 * generic utility function @event_trigger_init() (see
1628 * trace_event_triggers.c).
1630 * @free: An optional de-initialization function called for the
1631 * trigger when the trigger is unregistered (via the
1632 * event_command @reg() function). This can be used to perform
1633 * per-trigger de-initialization such as decrementing a
1634 * per-trigger reference count and freeing corresponding trigger
1635 * data, for instance. This is usually implemented by the
1636 * generic utility function @event_trigger_free() (see
1637 * trace_event_triggers.c).
1639 * @print: The callback function invoked to have the trigger print
1640 * itself. This is usually implemented by a wrapper function
1641 * that calls the generic utility function @event_trigger_print()
1642 * (see trace_event_triggers.c).
1644 struct event_trigger_ops {
1645 void (*func)(struct event_trigger_data *data,
1647 struct ring_buffer_event *rbe);
1648 int (*init)(struct event_trigger_ops *ops,
1649 struct event_trigger_data *data);
1650 void (*free)(struct event_trigger_ops *ops,
1651 struct event_trigger_data *data);
1652 int (*print)(struct seq_file *m,
1653 struct event_trigger_ops *ops,
1654 struct event_trigger_data *data);
1658 * struct event_command - callbacks and data members for event commands
1660 * Event commands are invoked by users by writing the command name
1661 * into the 'trigger' file associated with a trace event. The
1662 * parameters associated with a specific invocation of an event
1663 * command are used to create an event trigger instance, which is
1664 * added to the list of trigger instances associated with that trace
1665 * event. When the event is hit, the set of triggers associated with
1666 * that event is invoked.
1668 * The data members in this structure provide per-event command data
1669 * for various event commands.
1671 * All the data members below, except for @post_trigger, must be set
1672 * for each event command.
1674 * @name: The unique name that identifies the event command. This is
1675 * the name used when setting triggers via trigger files.
1677 * @trigger_type: A unique id that identifies the event command
1678 * 'type'. This value has two purposes, the first to ensure that
1679 * only one trigger of the same type can be set at a given time
1680 * for a particular event e.g. it doesn't make sense to have both
1681 * a traceon and traceoff trigger attached to a single event at
1682 * the same time, so traceon and traceoff have the same type
1683 * though they have different names. The @trigger_type value is
1684 * also used as a bit value for deferring the actual trigger
1685 * action until after the current event is finished. Some
1686 * commands need to do this if they themselves log to the trace
1687 * buffer (see the @post_trigger() member below). @trigger_type
1688 * values are defined by adding new values to the trigger_type
1689 * enum in include/linux/trace_events.h.
1691 * @flags: See the enum event_command_flags below.
1693 * All the methods below, except for @set_filter() and @unreg_all(),
1694 * must be implemented.
1696 * @func: The callback function responsible for parsing and
1697 * registering the trigger written to the 'trigger' file by the
1698 * user. It allocates the trigger instance and registers it with
1699 * the appropriate trace event. It makes use of the other
1700 * event_command callback functions to orchestrate this, and is
1701 * usually implemented by the generic utility function
1702 * @event_trigger_callback() (see trace_event_triggers.c).
1704 * @reg: Adds the trigger to the list of triggers associated with the
1705 * event, and enables the event trigger itself, after
1706 * initializing it (via the event_trigger_ops @init() function).
1707 * This is also where commands can use the @trigger_type value to
1708 * make the decision as to whether or not multiple instances of
1709 * the trigger should be allowed. This is usually implemented by
1710 * the generic utility function @register_trigger() (see
1711 * trace_event_triggers.c).
1713 * @unreg: Removes the trigger from the list of triggers associated
1714 * with the event, and disables the event trigger itself, after
1715 * initializing it (via the event_trigger_ops @free() function).
1716 * This is usually implemented by the generic utility function
1717 * @unregister_trigger() (see trace_event_triggers.c).
1719 * @unreg_all: An optional function called to remove all the triggers
1720 * from the list of triggers associated with the event. Called
1721 * when a trigger file is opened in truncate mode.
1723 * @set_filter: An optional function called to parse and set a filter
1724 * for the trigger. If no @set_filter() method is set for the
1725 * event command, filters set by the user for the command will be
1726 * ignored. This is usually implemented by the generic utility
1727 * function @set_trigger_filter() (see trace_event_triggers.c).
1729 * @get_trigger_ops: The callback function invoked to retrieve the
1730 * event_trigger_ops implementation associated with the command.
1732 struct event_command {
1733 struct list_head list;
1735 enum event_trigger_type trigger_type;
1737 int (*func)(struct event_command *cmd_ops,
1738 struct trace_event_file *file,
1739 char *glob, char *cmd, char *params);
1740 int (*reg)(char *glob,
1741 struct event_trigger_ops *ops,
1742 struct event_trigger_data *data,
1743 struct trace_event_file *file);
1744 void (*unreg)(char *glob,
1745 struct event_trigger_ops *ops,
1746 struct event_trigger_data *data,
1747 struct trace_event_file *file);
1748 void (*unreg_all)(struct trace_event_file *file);
1749 int (*set_filter)(char *filter_str,
1750 struct event_trigger_data *data,
1751 struct trace_event_file *file);
1752 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1756 * enum event_command_flags - flags for struct event_command
1758 * @POST_TRIGGER: A flag that says whether or not this command needs
1759 * to have its action delayed until after the current event has
1760 * been closed. Some triggers need to avoid being invoked while
1761 * an event is currently in the process of being logged, since
1762 * the trigger may itself log data into the trace buffer. Thus
1763 * we make sure the current event is committed before invoking
1764 * those triggers. To do that, the trigger invocation is split
1765 * in two - the first part checks the filter using the current
1766 * trace record; if a command has the @post_trigger flag set, it
1767 * sets a bit for itself in the return value, otherwise it
1768 * directly invokes the trigger. Once all commands have been
1769 * either invoked or set their return flag, the current record is
1770 * either committed or discarded. At that point, if any commands
1771 * have deferred their triggers, those commands are finally
1772 * invoked following the close of the current event. In other
1773 * words, if the event_trigger_ops @func() probe implementation
1774 * itself logs to the trace buffer, this flag should be set,
1775 * otherwise it can be left unspecified.
1777 * @NEEDS_REC: A flag that says whether or not this command needs
1778 * access to the trace record in order to perform its function,
1779 * regardless of whether or not it has a filter associated with
1780 * it (filters make a trigger require access to the trace record
1781 * but are not always present).
1783 enum event_command_flags {
1784 EVENT_CMD_FL_POST_TRIGGER = 1,
1785 EVENT_CMD_FL_NEEDS_REC = 2,
1788 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1790 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1793 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1795 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1798 extern int trace_event_enable_disable(struct trace_event_file *file,
1799 int enable, int soft_disable);
1800 extern int tracing_alloc_snapshot(void);
1801 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1802 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1804 extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1805 extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1807 extern const char *__start___trace_bprintk_fmt[];
1808 extern const char *__stop___trace_bprintk_fmt[];
1810 extern const char *__start___tracepoint_str[];
1811 extern const char *__stop___tracepoint_str[];
1813 void trace_printk_control(bool enabled);
1814 void trace_printk_start_comm(void);
1815 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1816 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1818 /* Used from boot time tracer */
1819 extern int trace_set_options(struct trace_array *tr, char *option);
1820 extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1821 extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1822 unsigned long size, int cpu_id);
1823 extern int tracing_set_cpumask(struct trace_array *tr,
1824 cpumask_var_t tracing_cpumask_new);
1827 #define MAX_EVENT_NAME_LEN 64
1829 extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1830 extern ssize_t trace_parse_run_command(struct file *file,
1831 const char __user *buffer, size_t count, loff_t *ppos,
1832 int (*createfn)(int, char**));
1834 extern unsigned int err_pos(char *cmd, const char *str);
1835 extern void tracing_log_err(struct trace_array *tr,
1836 const char *loc, const char *cmd,
1837 const char **errs, u8 type, u8 pos);
1840 * Normal trace_printk() and friends allocates special buffers
1841 * to do the manipulation, as well as saves the print formats
1842 * into sections to display. But the trace infrastructure wants
1843 * to use these without the added overhead at the price of being
1844 * a bit slower (used mainly for warnings, where we don't care
1845 * about performance). The internal_trace_puts() is for such
1848 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1851 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
1852 extern struct trace_event_call \
1853 __aligned(4) event_##call;
1854 #undef FTRACE_ENTRY_DUP
1855 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
1856 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1857 #undef FTRACE_ENTRY_PACKED
1858 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
1859 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1861 #include "trace_entries.h"
1863 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1864 int perf_ftrace_event_register(struct trace_event_call *call,
1865 enum trace_reg type, void *data);
1867 #define perf_ftrace_event_register NULL
1870 #ifdef CONFIG_FTRACE_SYSCALLS
1871 void init_ftrace_syscalls(void);
1872 const char *get_syscall_name(int syscall);
1874 static inline void init_ftrace_syscalls(void) { }
1875 static inline const char *get_syscall_name(int syscall)
1881 #ifdef CONFIG_EVENT_TRACING
1882 void trace_event_init(void);
1883 void trace_event_eval_update(struct trace_eval_map **map, int len);
1884 /* Used from boot time tracer */
1885 extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
1886 extern int trigger_process_regex(struct trace_event_file *file, char *buff);
1888 static inline void __init trace_event_init(void) { }
1889 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1892 #ifdef CONFIG_TRACER_SNAPSHOT
1893 void tracing_snapshot_instance(struct trace_array *tr);
1894 int tracing_alloc_snapshot_instance(struct trace_array *tr);
1896 static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1897 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1903 #ifdef CONFIG_PREEMPT_TRACER
1904 void tracer_preempt_on(unsigned long a0, unsigned long a1);
1905 void tracer_preempt_off(unsigned long a0, unsigned long a1);
1907 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
1908 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1910 #ifdef CONFIG_IRQSOFF_TRACER
1911 void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1912 void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1914 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
1915 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1918 extern struct trace_iterator *tracepoint_print_iter;
1921 * Reset the state of the trace_iterator so that it can read consumed data.
1922 * Normally, the trace_iterator is used for reading the data when it is not
1923 * consumed, and must retain state.
1925 static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1927 const size_t offset = offsetof(struct trace_iterator, seq);
1930 * Keep gcc from complaining about overwriting more than just one
1931 * member in the structure.
1933 memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
1938 /* Check the name is good for event/group/fields */
1939 static inline bool is_good_name(const char *name)
1941 if (!isalpha(*name) && *name != '_')
1943 while (*++name != '\0') {
1944 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
1950 #endif /* _LINUX_KERNEL_TRACE_H */