tracing: Inline tracing_gen_ctx_flags()
[linux-2.6-microblaze.git] / kernel / trace / trace.h
index 1dadef4..93fb08a 100644 (file)
@@ -136,25 +136,6 @@ struct kretprobe_trace_entry_head {
        unsigned long           ret_ip;
 };
 
-/*
- * trace_flag_type is an enumeration that holds different
- * states when a trace occurs. These are:
- *  IRQS_OFF           - interrupts were disabled
- *  IRQS_NOSUPPORT     - arch does not support irqs_disabled_flags
- *  NEED_RESCHED       - reschedule is requested
- *  HARDIRQ            - inside an interrupt handler
- *  SOFTIRQ            - inside a softirq handler
- */
-enum trace_flag_type {
-       TRACE_FLAG_IRQS_OFF             = 0x01,
-       TRACE_FLAG_IRQS_NOSUPPORT       = 0x02,
-       TRACE_FLAG_NEED_RESCHED         = 0x04,
-       TRACE_FLAG_HARDIRQ              = 0x08,
-       TRACE_FLAG_SOFTIRQ              = 0x10,
-       TRACE_FLAG_PREEMPT_RESCHED      = 0x20,
-       TRACE_FLAG_NMI                  = 0x40,
-};
-
 #define TRACE_BUF_SIZE         1024
 
 struct trace_array;
@@ -558,183 +539,6 @@ struct tracer {
        bool                    noboot;
 };
 
-
-/* Only current can touch trace_recursion */
-
-/*
- * For function tracing recursion:
- *  The order of these bits are important.
- *
- *  When function tracing occurs, the following steps are made:
- *   If arch does not support a ftrace feature:
- *    call internal function (uses INTERNAL bits) which calls...
- *   If callback is registered to the "global" list, the list
- *    function is called and recursion checks the GLOBAL bits.
- *    then this function calls...
- *   The function callback, which can use the FTRACE bits to
- *    check for recursion.
- *
- * Now if the arch does not support a feature, and it calls
- * the global list function which calls the ftrace callback
- * all three of these steps will do a recursion protection.
- * There's no reason to do one if the previous caller already
- * did. The recursion that we are protecting against will
- * go through the same steps again.
- *
- * To prevent the multiple recursion checks, if a recursion
- * bit is set that is higher than the MAX bit of the current
- * check, then we know that the check was made by the previous
- * caller, and we can skip the current check.
- */
-enum {
-       /* Function recursion bits */
-       TRACE_FTRACE_BIT,
-       TRACE_FTRACE_NMI_BIT,
-       TRACE_FTRACE_IRQ_BIT,
-       TRACE_FTRACE_SIRQ_BIT,
-
-       /* INTERNAL_BITs must be greater than FTRACE_BITs */
-       TRACE_INTERNAL_BIT,
-       TRACE_INTERNAL_NMI_BIT,
-       TRACE_INTERNAL_IRQ_BIT,
-       TRACE_INTERNAL_SIRQ_BIT,
-
-       TRACE_BRANCH_BIT,
-/*
- * Abuse of the trace_recursion.
- * As we need a way to maintain state if we are tracing the function
- * graph in irq because we want to trace a particular function that
- * was called in irq context but we have irq tracing off. Since this
- * can only be modified by current, we can reuse trace_recursion.
- */
-       TRACE_IRQ_BIT,
-
-       /* Set if the function is in the set_graph_function file */
-       TRACE_GRAPH_BIT,
-
-       /*
-        * In the very unlikely case that an interrupt came in
-        * at a start of graph tracing, and we want to trace
-        * the function in that interrupt, the depth can be greater
-        * than zero, because of the preempted start of a previous
-        * trace. In an even more unlikely case, depth could be 2
-        * if a softirq interrupted the start of graph tracing,
-        * followed by an interrupt preempting a start of graph
-        * tracing in the softirq, and depth can even be 3
-        * if an NMI came in at the start of an interrupt function
-        * that preempted a softirq start of a function that
-        * preempted normal context!!!! Luckily, it can't be
-        * greater than 3, so the next two bits are a mask
-        * of what the depth is when we set TRACE_GRAPH_BIT
-        */
-
-       TRACE_GRAPH_DEPTH_START_BIT,
-       TRACE_GRAPH_DEPTH_END_BIT,
-
-       /*
-        * To implement set_graph_notrace, if this bit is set, we ignore
-        * function graph tracing of called functions, until the return
-        * function is called to clear it.
-        */
-       TRACE_GRAPH_NOTRACE_BIT,
-
-       /*
-        * When transitioning between context, the preempt_count() may
-        * not be correct. Allow for a single recursion to cover this case.
-        */
-       TRACE_TRANSITION_BIT,
-};
-
-#define trace_recursion_set(bit)       do { (current)->trace_recursion |= (1<<(bit)); } while (0)
-#define trace_recursion_clear(bit)     do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
-#define trace_recursion_test(bit)      ((current)->trace_recursion & (1<<(bit)))
-
-#define trace_recursion_depth() \
-       (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
-#define trace_recursion_set_depth(depth) \
-       do {                                                            \
-               current->trace_recursion &=                             \
-                       ~(3 << TRACE_GRAPH_DEPTH_START_BIT);            \
-               current->trace_recursion |=                             \
-                       ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT;   \
-       } while (0)
-
-#define TRACE_CONTEXT_BITS     4
-
-#define TRACE_FTRACE_START     TRACE_FTRACE_BIT
-#define TRACE_FTRACE_MAX       ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
-
-#define TRACE_LIST_START       TRACE_INTERNAL_BIT
-#define TRACE_LIST_MAX         ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
-
-#define TRACE_CONTEXT_MASK     TRACE_LIST_MAX
-
-static __always_inline int trace_get_context_bit(void)
-{
-       int bit;
-
-       if (in_interrupt()) {
-               if (in_nmi())
-                       bit = 0;
-
-               else if (in_irq())
-                       bit = 1;
-               else
-                       bit = 2;
-       } else
-               bit = 3;
-
-       return bit;
-}
-
-static __always_inline int trace_test_and_set_recursion(int start, int max)
-{
-       unsigned int val = current->trace_recursion;
-       int bit;
-
-       /* A previous recursion check was made */
-       if ((val & TRACE_CONTEXT_MASK) > max)
-               return 0;
-
-       bit = trace_get_context_bit() + start;
-       if (unlikely(val & (1 << bit))) {
-               /*
-                * It could be that preempt_count has not been updated during
-                * a switch between contexts. Allow for a single recursion.
-                */
-               bit = TRACE_TRANSITION_BIT;
-               if (trace_recursion_test(bit))
-                       return -1;
-               trace_recursion_set(bit);
-               barrier();
-               return bit + 1;
-       }
-
-       /* Normal check passed, clear the transition to allow it again */
-       trace_recursion_clear(TRACE_TRANSITION_BIT);
-
-       val |= 1 << bit;
-       current->trace_recursion = val;
-       barrier();
-
-       return bit + 1;
-}
-
-static __always_inline void trace_clear_recursion(int bit)
-{
-       unsigned int val = current->trace_recursion;
-
-       if (!bit)
-               return;
-
-       bit--;
-       bit = 1 << bit;
-       val &= ~bit;
-
-       barrier();
-       current->trace_recursion = val;
-}
-
 static inline struct ring_buffer_iter *
 trace_buffer_iter(struct trace_iterator *iter, int cpu)
 {
@@ -766,8 +570,7 @@ struct ring_buffer_event *
 trace_buffer_lock_reserve(struct trace_buffer *buffer,
                          int type,
                          unsigned long len,
-                         unsigned long flags,
-                         int pc);
+                         unsigned int trace_ctx);
 
 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
                                                struct trace_array_cpu *data);
@@ -792,11 +595,11 @@ unsigned long trace_total_entries(struct trace_array *tr);
 void trace_function(struct trace_array *tr,
                    unsigned long ip,
                    unsigned long parent_ip,
-                   unsigned long flags, int pc);
+                   unsigned int trace_ctx);
 void trace_graph_function(struct trace_array *tr,
                    unsigned long ip,
                    unsigned long parent_ip,
-                   unsigned long flags, int pc);
+                   unsigned int trace_ctx);
 void trace_latency_header(struct seq_file *m);
 void trace_default_header(struct seq_file *m);
 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
@@ -864,11 +667,10 @@ static inline void latency_fsnotify(struct trace_array *tr) { }
 #endif
 
 #ifdef CONFIG_STACKTRACE
-void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
-                  int pc);
+void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
 #else
-static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
-                                int skip, int pc)
+static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
+                                int skip)
 {
 }
 #endif /* CONFIG_STACKTRACE */
@@ -896,6 +698,8 @@ extern bool ring_buffer_expanded;
 extern bool tracing_selftest_disabled;
 
 #ifdef CONFIG_FTRACE_STARTUP_TEST
+extern void __init disable_tracing_selftest(const char *reason);
+
 extern int trace_selftest_startup_function(struct tracer *trace,
                                           struct trace_array *tr);
 extern int trace_selftest_startup_function_graph(struct tracer *trace,
@@ -919,6 +723,9 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
  */
 #define __tracer_data          __refdata
 #else
+static inline void __init disable_tracing_selftest(const char *reason)
+{
+}
 /* Tracers are seldom changed. Optimize when selftests are disabled. */
 #define __tracer_data          __read_mostly
 #endif /* CONFIG_FTRACE_STARTUP_TEST */
@@ -1003,10 +810,10 @@ extern void graph_trace_open(struct trace_iterator *iter);
 extern void graph_trace_close(struct trace_iterator *iter);
 extern int __trace_graph_entry(struct trace_array *tr,
                               struct ftrace_graph_ent *trace,
-                              unsigned long flags, int pc);
+                              unsigned int trace_ctx);
 extern void __trace_graph_return(struct trace_array *tr,
                                 struct ftrace_graph_ret *trace,
-                                unsigned long flags, int pc);
+                                unsigned int trace_ctx);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern struct ftrace_hash __rcu *ftrace_graph_hash;
@@ -1469,15 +1276,15 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
                                     struct trace_buffer *buffer,
                                     struct ring_buffer_event *event,
-                                    unsigned long flags, int pc,
+                                    unsigned int trcace_ctx,
                                     struct pt_regs *regs);
 
 static inline void trace_buffer_unlock_commit(struct trace_array *tr,
                                              struct trace_buffer *buffer,
                                              struct ring_buffer_event *event,
-                                             unsigned long flags, int pc)
+                                             unsigned int trace_ctx)
 {
-       trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
+       trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
 }
 
 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
@@ -1538,8 +1345,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
  * @buffer: The ring buffer that the event is being written to
  * @event: The event meta data in the ring buffer
  * @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
+ * @trace_ctx: The tracing context flags.
  *
  * This is a helper function to handle triggers that require data
  * from the event itself. It also tests the event against filters and
@@ -1549,12 +1355,12 @@ static inline void
 event_trigger_unlock_commit(struct trace_event_file *file,
                            struct trace_buffer *buffer,
                            struct ring_buffer_event *event,
-                           void *entry, unsigned long irq_flags, int pc)
+                           void *entry, unsigned int trace_ctx)
 {
        enum event_trigger_type tt = ETT_NONE;
 
        if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
-               trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
+               trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
 
        if (tt)
                event_triggers_post_call(file, tt);
@@ -1566,8 +1372,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
  * @buffer: The ring buffer that the event is being written to
  * @event: The event meta data in the ring buffer
  * @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
+ * @trace_ctx: The tracing context flags.
  *
  * This is a helper function to handle triggers that require data
  * from the event itself. It also tests the event against filters and
@@ -1580,14 +1385,14 @@ static inline void
 event_trigger_unlock_commit_regs(struct trace_event_file *file,
                                 struct trace_buffer *buffer,
                                 struct ring_buffer_event *event,
-                                void *entry, unsigned long irq_flags, int pc,
+                                void *entry, unsigned int trace_ctx,
                                 struct pt_regs *regs)
 {
        enum event_trigger_type tt = ETT_NONE;
 
        if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
                trace_buffer_unlock_commit_regs(file->tr, buffer, event,
-                                               irq_flags, pc, regs);
+                                               trace_ctx, regs);
 
        if (tt)
                event_triggers_post_call(file, tt);