1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Ftrace header. For implementation details beyond the random comments
4 * scattered below, see: Documentation/trace/ftrace-design.rst
7 #ifndef _LINUX_FTRACE_H
8 #define _LINUX_FTRACE_H
10 #include <linux/trace_recursion.h>
11 #include <linux/trace_clock.h>
12 #include <linux/kallsyms.h>
13 #include <linux/linkage.h>
14 #include <linux/bitops.h>
15 #include <linux/ptrace.h>
16 #include <linux/ktime.h>
17 #include <linux/sched.h>
18 #include <linux/types.h>
19 #include <linux/init.h>
22 #include <asm/ftrace.h>
25 * If the arch supports passing the variable contents of
26 * function_trace_op as the third parameter back from the
27 * mcount call, then the arch should define this as 1.
29 #ifndef ARCH_SUPPORTS_FTRACE_OPS
30 #define ARCH_SUPPORTS_FTRACE_OPS 0
34 * If the arch's mcount caller does not support all of ftrace's
35 * features, then it must call an indirect function that
36 * does. Or at least does enough to prevent any unwelcomed side effects.
38 #if !ARCH_SUPPORTS_FTRACE_OPS
39 # define FTRACE_FORCE_LIST_FUNC 1
41 # define FTRACE_FORCE_LIST_FUNC 0
44 /* Main tracing buffer and events set up */
46 void trace_init(void);
47 void early_trace_init(void);
49 static inline void trace_init(void) { }
50 static inline void early_trace_init(void) { }
55 struct ftrace_direct_func;
57 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
58 defined(CONFIG_DYNAMIC_FTRACE)
60 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
61 unsigned long *off, char **modname, char *sym);
63 static inline const char *
64 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
65 unsigned long *off, char **modname, char *sym)
71 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
72 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
73 char *type, char *name,
74 char *module_name, int *exported);
76 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
77 char *type, char *name,
78 char *module_name, int *exported)
84 #ifdef CONFIG_FUNCTION_TRACER
86 extern int ftrace_enabled;
88 ftrace_enable_sysctl(struct ctl_table *table, int write,
89 void *buffer, size_t *lenp, loff_t *ppos);
93 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
98 #define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
101 * ftrace_instruction_pointer_set() is to be defined by the architecture
102 * if to allow setting of the instruction pointer from the ftrace_regs
103 * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports
104 * live kernel patching.
106 #define ftrace_instruction_pointer_set(fregs, ip) do { } while (0)
107 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
109 static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
114 return arch_ftrace_get_regs(fregs);
117 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
118 struct ftrace_ops *op, struct ftrace_regs *fregs);
120 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
123 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
124 * set in the flags member.
125 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
126 * IPMODIFY are a kind of attribute flags which can be set only before
127 * registering the ftrace_ops, and can not be modified while registered.
128 * Changing those attribute flags after registering ftrace_ops will
129 * cause unexpected results.
131 * ENABLED - set/unset when ftrace_ops is registered/unregistered
132 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
133 * allocated ftrace_ops which need special care
134 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
135 * and passed to the callback. If this flag is set, but the
136 * architecture does not support passing regs
137 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
138 * ftrace_ops will fail to register, unless the next flag
140 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
141 * handler can handle an arch that does not save regs
142 * (the handler tests if regs == NULL), then it can set
143 * this flag instead. It will not fail registering the ftrace_ops
144 * but, the regs field will be NULL if the arch does not support
145 * passing regs to the handler.
146 * Note, if this flag is set, the SAVE_REGS flag will automatically
147 * get set upon registering the ftrace_ops, if the arch supports it.
148 * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
149 * that the call back needs recursion protection. If it does
150 * not set this, then the ftrace infrastructure will assume
151 * that the callback can handle recursion on its own.
152 * STUB - The ftrace_ops is just a place holder.
153 * INITIALIZED - The ftrace_ops has already been initialized (first use time
154 * register_ftrace_function() is called, it will initialized the ops)
155 * DELETED - The ops are being deleted, do not let them be registered again.
156 * ADDING - The ops is in the process of being added.
157 * REMOVING - The ops is in the process of being removed.
158 * MODIFYING - The ops is in the process of changing its filter functions.
159 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
160 * The arch specific code sets this flag when it allocated a
161 * trampoline. This lets the arch know that it can update the
162 * trampoline in case the callback function changes.
163 * The ftrace_ops trampoline can be set by the ftrace users, and
164 * in such cases the arch must not modify it. Only the arch ftrace
165 * core code should set this flag.
166 * IPMODIFY - The ops can modify the IP register. This can only be set with
167 * SAVE_REGS. If another ops with this flag set is already registered
168 * for any of the functions that this ops will be registered for, then
169 * this ops will fail to register or set_filter_ip.
170 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
171 * RCU - Set when the ops can only be called when RCU is watching.
172 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
173 * PERMANENT - Set when the ops is permanent and should not be affected by
175 * DIRECT - Used by the direct ftrace_ops helper for direct functions
176 * (internal ftrace only, should not be used by others)
179 FTRACE_OPS_FL_ENABLED = BIT(0),
180 FTRACE_OPS_FL_DYNAMIC = BIT(1),
181 FTRACE_OPS_FL_SAVE_REGS = BIT(2),
182 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
183 FTRACE_OPS_FL_RECURSION = BIT(4),
184 FTRACE_OPS_FL_STUB = BIT(5),
185 FTRACE_OPS_FL_INITIALIZED = BIT(6),
186 FTRACE_OPS_FL_DELETED = BIT(7),
187 FTRACE_OPS_FL_ADDING = BIT(8),
188 FTRACE_OPS_FL_REMOVING = BIT(9),
189 FTRACE_OPS_FL_MODIFYING = BIT(10),
190 FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
191 FTRACE_OPS_FL_IPMODIFY = BIT(12),
192 FTRACE_OPS_FL_PID = BIT(13),
193 FTRACE_OPS_FL_RCU = BIT(14),
194 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
195 FTRACE_OPS_FL_PERMANENT = BIT(16),
196 FTRACE_OPS_FL_DIRECT = BIT(17),
199 #ifdef CONFIG_DYNAMIC_FTRACE
200 /* The hash used to know what functions callbacks trace */
201 struct ftrace_ops_hash {
202 struct ftrace_hash __rcu *notrace_hash;
203 struct ftrace_hash __rcu *filter_hash;
204 struct mutex regex_lock;
207 void ftrace_free_init_mem(void);
208 void ftrace_free_mem(struct module *mod, void *start, void *end);
210 static inline void ftrace_free_init_mem(void) { }
211 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
215 * Note, ftrace_ops can be referenced outside of RCU protection, unless
216 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
217 * core data, the unregistering of it will perform a scheduling on all CPUs
218 * to make sure that there are no more users. Depending on the load of the
219 * system that may take a bit of time.
221 * Any private data added must also take care not to be freed and if private
222 * data is added to a ftrace_ops that is in core code, the user of the
223 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
227 struct ftrace_ops __rcu *next;
230 ftrace_func_t saved_func;
231 #ifdef CONFIG_DYNAMIC_FTRACE
232 struct ftrace_ops_hash local_hash;
233 struct ftrace_ops_hash *func_hash;
234 struct ftrace_ops_hash old_hash;
235 unsigned long trampoline;
236 unsigned long trampoline_size;
237 struct list_head list;
241 extern struct ftrace_ops __rcu *ftrace_ops_list;
242 extern struct ftrace_ops ftrace_list_end;
245 * Traverse the ftrace_ops_list, invoking all entries. The reason that we
246 * can use rcu_dereference_raw_check() is that elements removed from this list
247 * are simply leaked, so there is no need to interact with a grace-period
248 * mechanism. The rcu_dereference_raw_check() calls are needed to handle
249 * concurrent insertions into the ftrace_ops_list.
251 * Silly Alpha and silly pointer-speculation compiler optimizations!
253 #define do_for_each_ftrace_op(op, list) \
254 op = rcu_dereference_raw_check(list); \
258 * Optimized for just a single item in the list (as that is the normal case).
260 #define while_for_each_ftrace_op(op) \
261 while (likely(op = rcu_dereference_raw_check((op)->next)) && \
262 unlikely((op) != &ftrace_list_end))
265 * Type of the current tracing.
267 enum ftrace_tracing_type_t {
268 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
269 FTRACE_TYPE_RETURN, /* Hook the return of the function */
272 /* Current tracing type, default is FTRACE_TYPE_ENTER */
273 extern enum ftrace_tracing_type_t ftrace_tracing_type;
276 * The ftrace_ops must be a static and should also
277 * be read_mostly. These functions do modify read_mostly variables
278 * so use them sparely. Never free an ftrace_op or modify the
279 * next pointer after it has been registered. Even after unregistering
280 * it, the next pointer may still be used internally.
282 int register_ftrace_function(struct ftrace_ops *ops);
283 int unregister_ftrace_function(struct ftrace_ops *ops);
285 extern void ftrace_stub(unsigned long a0, unsigned long a1,
286 struct ftrace_ops *op, struct ftrace_regs *fregs);
288 #else /* !CONFIG_FUNCTION_TRACER */
290 * (un)register_ftrace_function must be a macro since the ops parameter
291 * must not be evaluated.
293 #define register_ftrace_function(ops) ({ 0; })
294 #define unregister_ftrace_function(ops) ({ 0; })
295 static inline void ftrace_kill(void) { }
296 static inline void ftrace_free_init_mem(void) { }
297 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
298 #endif /* CONFIG_FUNCTION_TRACER */
300 struct ftrace_func_entry {
301 struct hlist_node hlist;
303 unsigned long direct; /* for direct lookup only */
308 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
309 extern int ftrace_direct_func_count;
310 int register_ftrace_direct(unsigned long ip, unsigned long addr);
311 int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
312 int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
313 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
314 int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
315 struct dyn_ftrace *rec,
316 unsigned long old_addr,
317 unsigned long new_addr);
318 unsigned long ftrace_find_rec_direct(unsigned long ip);
320 # define ftrace_direct_func_count 0
321 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
325 static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
329 static inline int modify_ftrace_direct(unsigned long ip,
330 unsigned long old_addr, unsigned long new_addr)
334 static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
338 static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
339 struct dyn_ftrace *rec,
340 unsigned long old_addr,
341 unsigned long new_addr)
345 static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
349 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
351 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
353 * This must be implemented by the architecture.
354 * It is the way the ftrace direct_ops helper, when called
355 * via ftrace (because there's other callbacks besides the
356 * direct call), can inform the architecture's trampoline that this
357 * routine has a direct caller, and what the caller is.
359 * For example, in x86, it returns the direct caller
360 * callback function via the regs->orig_ax parameter.
361 * Then in the ftrace trampoline, if this is set, it makes
362 * the return from the trampoline jump to the direct caller
363 * instead of going back to the function it just traced.
365 static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
366 unsigned long addr) { }
367 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
369 #ifdef CONFIG_STACK_TRACER
371 extern int stack_tracer_enabled;
373 int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
374 size_t *lenp, loff_t *ppos);
376 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
377 DECLARE_PER_CPU(int, disable_stack_tracer);
380 * stack_tracer_disable - temporarily disable the stack tracer
382 * There's a few locations (namely in RCU) where stack tracing
383 * cannot be executed. This function is used to disable stack
384 * tracing during those critical sections.
386 * This function must be called with preemption or interrupts
387 * disabled and stack_tracer_enable() must be called shortly after
388 * while preemption or interrupts are still disabled.
390 static inline void stack_tracer_disable(void)
392 /* Preemption or interupts must be disabled */
393 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
394 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
395 this_cpu_inc(disable_stack_tracer);
399 * stack_tracer_enable - re-enable the stack tracer
401 * After stack_tracer_disable() is called, stack_tracer_enable()
402 * must be called shortly afterward.
404 static inline void stack_tracer_enable(void)
406 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
407 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
408 this_cpu_dec(disable_stack_tracer);
411 static inline void stack_tracer_disable(void) { }
412 static inline void stack_tracer_enable(void) { }
415 #ifdef CONFIG_DYNAMIC_FTRACE
417 int ftrace_arch_code_modify_prepare(void);
418 int ftrace_arch_code_modify_post_process(void);
420 enum ftrace_bug_type {
427 extern enum ftrace_bug_type ftrace_bug_type;
430 * Archs can set this to point to a variable that holds the value that was
431 * expected at the call site before calling ftrace_bug().
433 extern const void *ftrace_expected;
435 void ftrace_bug(int err, struct dyn_ftrace *rec);
439 extern int ftrace_text_reserved(const void *start, const void *end);
441 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
443 bool is_ftrace_trampoline(unsigned long addr);
446 * The dyn_ftrace record's flags field is split into two parts.
447 * the first part which is '0-FTRACE_REF_MAX' is a counter of
448 * the number of callbacks that have registered the function that
449 * the dyn_ftrace descriptor represents.
451 * The second part is a mask:
452 * ENABLED - the function is being traced
453 * REGS - the record wants the function to save regs
454 * REGS_EN - the function is set up to save regs.
455 * IPMODIFY - the record allows for the IP address to be changed.
456 * DISABLED - the record is not ready to be touched yet
457 * DIRECT - there is a direct function to call
459 * When a new ftrace_ops is registered and wants a function to save
460 * pt_regs, the rec->flags REGS is set. When the function has been
461 * set up to save regs, the REG_EN flag is set. Once a function
462 * starts saving regs it will do so until all ftrace_ops are removed
463 * from tracing that function.
466 FTRACE_FL_ENABLED = (1UL << 31),
467 FTRACE_FL_REGS = (1UL << 30),
468 FTRACE_FL_REGS_EN = (1UL << 29),
469 FTRACE_FL_TRAMP = (1UL << 28),
470 FTRACE_FL_TRAMP_EN = (1UL << 27),
471 FTRACE_FL_IPMODIFY = (1UL << 26),
472 FTRACE_FL_DISABLED = (1UL << 25),
473 FTRACE_FL_DIRECT = (1UL << 24),
474 FTRACE_FL_DIRECT_EN = (1UL << 23),
477 #define FTRACE_REF_MAX_SHIFT 23
478 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
480 #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
483 unsigned long ip; /* address of mcount call-site */
485 struct dyn_arch_ftrace arch;
488 int ftrace_force_update(void);
489 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
490 int remove, int reset);
491 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
493 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
495 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
496 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
497 void ftrace_free_filter(struct ftrace_ops *ops);
498 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
501 FTRACE_UPDATE_CALLS = (1 << 0),
502 FTRACE_DISABLE_CALLS = (1 << 1),
503 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
504 FTRACE_START_FUNC_RET = (1 << 3),
505 FTRACE_STOP_FUNC_RET = (1 << 4),
506 FTRACE_MAY_SLEEP = (1 << 5),
510 * The FTRACE_UPDATE_* enum is used to pass information back
511 * from the ftrace_update_record() and ftrace_test_record()
512 * functions. These are called by the code update routines
513 * to find out what is to be done for a given function.
515 * IGNORE - The function is already what we want it to be
516 * MAKE_CALL - Start tracing the function
517 * MODIFY_CALL - Stop saving regs for the function
518 * MAKE_NOP - Stop tracing the function
521 FTRACE_UPDATE_IGNORE,
522 FTRACE_UPDATE_MAKE_CALL,
523 FTRACE_UPDATE_MODIFY_CALL,
524 FTRACE_UPDATE_MAKE_NOP,
528 FTRACE_ITER_FILTER = (1 << 0),
529 FTRACE_ITER_NOTRACE = (1 << 1),
530 FTRACE_ITER_PRINTALL = (1 << 2),
531 FTRACE_ITER_DO_PROBES = (1 << 3),
532 FTRACE_ITER_PROBE = (1 << 4),
533 FTRACE_ITER_MOD = (1 << 5),
534 FTRACE_ITER_ENABLED = (1 << 6),
537 void arch_ftrace_update_code(int command);
538 void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
539 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
540 void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
542 struct ftrace_rec_iter;
544 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
545 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
546 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
548 #define for_ftrace_rec_iter(iter) \
549 for (iter = ftrace_rec_iter_start(); \
551 iter = ftrace_rec_iter_next(iter))
554 int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
555 int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
556 void ftrace_run_stop_machine(int command);
557 unsigned long ftrace_location(unsigned long ip);
558 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
559 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
560 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
562 extern ftrace_func_t ftrace_trace_function;
564 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
565 struct inode *inode, struct file *file);
566 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
567 size_t cnt, loff_t *ppos);
568 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
569 size_t cnt, loff_t *ppos);
570 int ftrace_regex_release(struct inode *inode, struct file *file);
573 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
575 /* defined in arch */
576 extern int ftrace_ip_converted(unsigned long ip);
577 extern int ftrace_dyn_arch_init(void);
578 extern void ftrace_replace_code(int enable);
579 extern int ftrace_update_ftrace_func(ftrace_func_t func);
580 extern void ftrace_caller(void);
581 extern void ftrace_regs_caller(void);
582 extern void ftrace_call(void);
583 extern void ftrace_regs_call(void);
584 extern void mcount_call(void);
586 void ftrace_modify_all_code(int command);
589 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
592 #ifndef FTRACE_GRAPH_ADDR
593 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
596 #ifndef FTRACE_REGS_ADDR
597 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
598 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
600 # define FTRACE_REGS_ADDR FTRACE_ADDR
605 * If an arch would like functions that are only traced
606 * by the function graph tracer to jump directly to its own
607 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
608 * to be that address to jump to.
610 #ifndef FTRACE_GRAPH_TRAMP_ADDR
611 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
614 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
615 extern void ftrace_graph_caller(void);
616 extern int ftrace_enable_ftrace_graph_caller(void);
617 extern int ftrace_disable_ftrace_graph_caller(void);
619 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
620 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
624 * ftrace_make_nop - convert code into nop
625 * @mod: module structure if called by module load initialization
626 * @rec: the call site record (e.g. mcount/fentry)
627 * @addr: the address that the call site should be calling
629 * This is a very sensitive operation and great care needs
630 * to be taken by the arch. The operation should carefully
631 * read the location, check to see if what is read is indeed
632 * what we expect it to be, and then on success of the compare,
633 * it should write to the location.
635 * The code segment at @rec->ip should be a caller to @addr
639 * -EFAULT on error reading the location
640 * -EINVAL on a failed compare of the contents
641 * -EPERM on error writing to the location
642 * Any other value will be considered a failure.
644 extern int ftrace_make_nop(struct module *mod,
645 struct dyn_ftrace *rec, unsigned long addr);
649 * ftrace_init_nop - initialize a nop call site
650 * @mod: module structure if called by module load initialization
651 * @rec: the call site record (e.g. mcount/fentry)
653 * This is a very sensitive operation and great care needs
654 * to be taken by the arch. The operation should carefully
655 * read the location, check to see if what is read is indeed
656 * what we expect it to be, and then on success of the compare,
657 * it should write to the location.
659 * The code segment at @rec->ip should contain the contents created by
664 * -EFAULT on error reading the location
665 * -EINVAL on a failed compare of the contents
666 * -EPERM on error writing to the location
667 * Any other value will be considered a failure.
669 #ifndef ftrace_init_nop
670 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
672 return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
677 * ftrace_make_call - convert a nop call site into a call to addr
678 * @rec: the call site record (e.g. mcount/fentry)
679 * @addr: the address that the call site should call
681 * This is a very sensitive operation and great care needs
682 * to be taken by the arch. The operation should carefully
683 * read the location, check to see if what is read is indeed
684 * what we expect it to be, and then on success of the compare,
685 * it should write to the location.
687 * The code segment at @rec->ip should be a nop
691 * -EFAULT on error reading the location
692 * -EINVAL on a failed compare of the contents
693 * -EPERM on error writing to the location
694 * Any other value will be considered a failure.
696 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
698 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
700 * ftrace_modify_call - convert from one addr to another (no nop)
701 * @rec: the call site record (e.g. mcount/fentry)
702 * @old_addr: the address expected to be currently called to
703 * @addr: the address to change to
705 * This is a very sensitive operation and great care needs
706 * to be taken by the arch. The operation should carefully
707 * read the location, check to see if what is read is indeed
708 * what we expect it to be, and then on success of the compare,
709 * it should write to the location.
711 * The code segment at @rec->ip should be a caller to @old_addr
715 * -EFAULT on error reading the location
716 * -EINVAL on a failed compare of the contents
717 * -EPERM on error writing to the location
718 * Any other value will be considered a failure.
720 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
723 /* Should never be called */
724 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
731 /* May be defined in arch */
732 extern int ftrace_arch_read_dyn_info(char *buf, int size);
734 extern int skip_trace(unsigned long ip);
735 extern void ftrace_module_init(struct module *mod);
736 extern void ftrace_module_enable(struct module *mod);
737 extern void ftrace_release_mod(struct module *mod);
739 extern void ftrace_disable_daemon(void);
740 extern void ftrace_enable_daemon(void);
741 #else /* CONFIG_DYNAMIC_FTRACE */
742 static inline int skip_trace(unsigned long ip) { return 0; }
743 static inline int ftrace_force_update(void) { return 0; }
744 static inline void ftrace_disable_daemon(void) { }
745 static inline void ftrace_enable_daemon(void) { }
746 static inline void ftrace_module_init(struct module *mod) { }
747 static inline void ftrace_module_enable(struct module *mod) { }
748 static inline void ftrace_release_mod(struct module *mod) { }
749 static inline int ftrace_text_reserved(const void *start, const void *end)
753 static inline unsigned long ftrace_location(unsigned long ip)
759 * Again users of functions that have ftrace_ops may not
760 * have them defined when ftrace is not enabled, but these
761 * functions may still be called. Use a macro instead of inline.
763 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
764 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
765 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
766 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
767 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
768 #define ftrace_free_filter(ops) do { } while (0)
769 #define ftrace_ops_set_global_filter(ops) do { } while (0)
771 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
772 size_t cnt, loff_t *ppos) { return -ENODEV; }
773 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
774 size_t cnt, loff_t *ppos) { return -ENODEV; }
776 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
778 static inline bool is_ftrace_trampoline(unsigned long addr)
782 #endif /* CONFIG_DYNAMIC_FTRACE */
784 /* totally disable ftrace - can not re-enable after this */
785 void ftrace_kill(void);
787 static inline void tracer_disable(void)
789 #ifdef CONFIG_FUNCTION_TRACER
795 * Ftrace disable/restore without lock. Some synchronization mechanism
796 * must be used to prevent ftrace_enabled to be changed between
799 static inline int __ftrace_enabled_save(void)
801 #ifdef CONFIG_FUNCTION_TRACER
802 int saved_ftrace_enabled = ftrace_enabled;
804 return saved_ftrace_enabled;
810 static inline void __ftrace_enabled_restore(int enabled)
812 #ifdef CONFIG_FUNCTION_TRACER
813 ftrace_enabled = enabled;
817 /* All archs should have this, but we define it for consistency */
818 #ifndef ftrace_return_address0
819 # define ftrace_return_address0 __builtin_return_address(0)
822 /* Archs may use other ways for ADDR1 and beyond */
823 #ifndef ftrace_return_address
824 # ifdef CONFIG_FRAME_POINTER
825 # define ftrace_return_address(n) __builtin_return_address(n)
827 # define ftrace_return_address(n) 0UL
831 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
832 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
833 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
834 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
835 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
836 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
837 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
839 static inline unsigned long get_lock_parent_ip(void)
841 unsigned long addr = CALLER_ADDR0;
843 if (!in_lock_functions(addr))
846 if (!in_lock_functions(addr))
851 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
852 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
853 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
856 * Use defines instead of static inlines because some arches will make code out
857 * of the CALLER_ADDR, when we really want these to be a real nop.
859 # define trace_preempt_on(a0, a1) do { } while (0)
860 # define trace_preempt_off(a0, a1) do { } while (0)
863 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
864 extern void ftrace_init(void);
865 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
866 #define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
868 #define FTRACE_CALLSITE_SECTION "__mcount_loc"
871 static inline void ftrace_init(void) { }
875 * Structure that defines an entry function trace.
876 * It's already packed but the attribute "packed" is needed
877 * to remove extra padding at the end.
879 struct ftrace_graph_ent {
880 unsigned long func; /* Current function */
885 * Structure that defines a return function trace.
886 * It's already packed but the attribute "packed" is needed
887 * to remove extra padding at the end.
889 struct ftrace_graph_ret {
890 unsigned long func; /* Current function */
892 /* Number of functions that overran the depth limit for current task */
893 unsigned int overrun;
894 unsigned long long calltime;
895 unsigned long long rettime;
898 /* Type of the callback handlers for tracing function graph*/
899 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
900 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
902 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
904 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
907 trace_func_graph_ent_t entryfunc;
908 trace_func_graph_ret_t retfunc;
912 * Stack of return addresses for functions
914 * Used in struct thread_info
916 struct ftrace_ret_stack {
919 unsigned long long calltime;
920 #ifdef CONFIG_FUNCTION_PROFILER
921 unsigned long long subtime;
923 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
926 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
932 * Primary handler of a function return.
933 * It relays on ftrace_return_to_handler.
934 * Defined in entry_32/64.S
936 extern void return_to_handler(void);
939 function_graph_enter(unsigned long ret, unsigned long func,
940 unsigned long frame_pointer, unsigned long *retp);
942 struct ftrace_ret_stack *
943 ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
945 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
946 unsigned long ret, unsigned long *retp);
949 * Sometimes we don't want to trace a function with the function
950 * graph tracer but we want them to keep traced by the usual function
951 * tracer if the function graph tracer is not configured.
953 #define __notrace_funcgraph notrace
955 #define FTRACE_RETFUNC_DEPTH 50
956 #define FTRACE_RETSTACK_ALLOC_SIZE 32
958 extern int register_ftrace_graph(struct fgraph_ops *ops);
959 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
961 extern bool ftrace_graph_is_dead(void);
962 extern void ftrace_graph_stop(void);
964 /* The current handlers in use */
965 extern trace_func_graph_ret_t ftrace_graph_return;
966 extern trace_func_graph_ent_t ftrace_graph_entry;
968 extern void ftrace_graph_init_task(struct task_struct *t);
969 extern void ftrace_graph_exit_task(struct task_struct *t);
970 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
972 static inline void pause_graph_tracing(void)
974 atomic_inc(¤t->tracing_graph_pause);
977 static inline void unpause_graph_tracing(void)
979 atomic_dec(¤t->tracing_graph_pause);
981 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
983 #define __notrace_funcgraph
985 static inline void ftrace_graph_init_task(struct task_struct *t) { }
986 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
987 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
989 /* Define as macros as fgraph_ops may not be defined */
990 #define register_ftrace_graph(ops) ({ -1; })
991 #define unregister_ftrace_graph(ops) do { } while (0)
993 static inline unsigned long
994 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
1000 static inline void pause_graph_tracing(void) { }
1001 static inline void unpause_graph_tracing(void) { }
1002 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1004 #ifdef CONFIG_TRACING
1006 /* flags for current->trace */
1008 TSK_TRACE_FL_TRACE_BIT = 0,
1009 TSK_TRACE_FL_GRAPH_BIT = 1,
1012 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
1013 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
1016 static inline void set_tsk_trace_trace(struct task_struct *tsk)
1018 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
1021 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
1023 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
1026 static inline int test_tsk_trace_trace(struct task_struct *tsk)
1028 return tsk->trace & TSK_TRACE_FL_TRACE;
1031 static inline void set_tsk_trace_graph(struct task_struct *tsk)
1033 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1036 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
1038 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1041 static inline int test_tsk_trace_graph(struct task_struct *tsk)
1043 return tsk->trace & TSK_TRACE_FL_GRAPH;
1046 enum ftrace_dump_mode;
1048 extern enum ftrace_dump_mode ftrace_dump_on_oops;
1049 extern int tracepoint_printk;
1051 extern void disable_trace_on_warning(void);
1052 extern int __disable_trace_on_warning;
1054 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
1055 void *buffer, size_t *lenp, loff_t *ppos);
1057 #else /* CONFIG_TRACING */
1058 static inline void disable_trace_on_warning(void) { }
1059 #endif /* CONFIG_TRACING */
1061 #ifdef CONFIG_FTRACE_SYSCALLS
1063 unsigned long arch_syscall_addr(int nr);
1065 #endif /* CONFIG_FTRACE_SYSCALLS */
1067 #endif /* _LINUX_FTRACE_H */