tracing: Merge irqflags + preempt counter.
[linux-2.6-microblaze.git] / kernel / trace / trace.c
index b8a2d78..0b3cce6 100644 (file)
@@ -176,7 +176,7 @@ static union trace_eval_map_item *trace_eval_maps;
 int tracing_set_tracer(struct trace_array *tr, const char *buf);
 static void ftrace_trace_userstack(struct trace_array *tr,
                                   struct trace_buffer *buffer,
-                                  unsigned long flags, int pc);
+                                  unsigned int trace_ctx);
 
 #define MAX_TRACER_SIZE                100
 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
@@ -530,6 +530,7 @@ trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
 /**
  * trace_ignore_this_task - should a task be ignored for tracing
  * @filtered_pids: The list of pids to check
+ * @filtered_no_pids: The list of pids not to be traced
  * @task: The task that should be ignored if not filtered
  *
  * Checks if @task should be traced or not from @filtered_pids.
@@ -780,7 +781,7 @@ u64 ftrace_now(int cpu)
 }
 
 /**
- * tracing_is_enabled - Show if global_trace has been disabled
+ * tracing_is_enabled - Show if global_trace has been enabled
  *
  * Shows if the global trace has been enabled or not. It uses the
  * mirror flag "buffer_disabled" to be used in fast paths such as for
@@ -905,23 +906,23 @@ static inline void trace_access_lock_init(void)
 
 #ifdef CONFIG_STACKTRACE
 static void __ftrace_trace_stack(struct trace_buffer *buffer,
-                                unsigned long flags,
-                                int skip, int pc, struct pt_regs *regs);
+                                unsigned int trace_ctx,
+                                int skip, struct pt_regs *regs);
 static inline void ftrace_trace_stack(struct trace_array *tr,
                                      struct trace_buffer *buffer,
-                                     unsigned long flags,
-                                     int skip, int pc, struct pt_regs *regs);
+                                     unsigned int trace_ctx,
+                                     int skip, struct pt_regs *regs);
 
 #else
 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
-                                       unsigned long flags,
-                                       int skip, int pc, struct pt_regs *regs)
+                                       unsigned int trace_ctx,
+                                       int skip, struct pt_regs *regs)
 {
 }
 static inline void ftrace_trace_stack(struct trace_array *tr,
                                      struct trace_buffer *buffer,
-                                     unsigned long flags,
-                                     int skip, int pc, struct pt_regs *regs)
+                                     unsigned long trace_ctx,
+                                     int skip, struct pt_regs *regs)
 {
 }
 
@@ -929,24 +930,24 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
 
 static __always_inline void
 trace_event_setup(struct ring_buffer_event *event,
-                 int type, unsigned long flags, int pc)
+                 int type, unsigned int trace_ctx)
 {
        struct trace_entry *ent = ring_buffer_event_data(event);
 
-       tracing_generic_entry_update(ent, type, flags, pc);
+       tracing_generic_entry_update(ent, type, trace_ctx);
 }
 
 static __always_inline struct ring_buffer_event *
 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
                          int type,
                          unsigned long len,
-                         unsigned long flags, int pc)
+                         unsigned int trace_ctx)
 {
        struct ring_buffer_event *event;
 
        event = ring_buffer_lock_reserve(buffer, len);
        if (event != NULL)
-               trace_event_setup(event, type, flags, pc);
+               trace_event_setup(event, type, trace_ctx);
 
        return event;
 }
@@ -1007,25 +1008,22 @@ int __trace_puts(unsigned long ip, const char *str, int size)
        struct ring_buffer_event *event;
        struct trace_buffer *buffer;
        struct print_entry *entry;
-       unsigned long irq_flags;
+       unsigned int trace_ctx;
        int alloc;
-       int pc;
 
        if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
                return 0;
 
-       pc = preempt_count();
-
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
 
        alloc = sizeof(*entry) + size + 2; /* possible \n added */
 
-       local_save_flags(irq_flags);
+       trace_ctx = tracing_gen_ctx();
        buffer = global_trace.array_buffer.buffer;
        ring_buffer_nest_start(buffer);
-       event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
-                                           irq_flags, pc);
+       event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+                                           trace_ctx);
        if (!event) {
                size = 0;
                goto out;
@@ -1044,7 +1042,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
                entry->buf[size] = '\0';
 
        __buffer_unlock_commit(buffer, event);
-       ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
+       ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
  out:
        ring_buffer_nest_end(buffer);
        return size;
@@ -1061,25 +1059,22 @@ int __trace_bputs(unsigned long ip, const char *str)
        struct ring_buffer_event *event;
        struct trace_buffer *buffer;
        struct bputs_entry *entry;
-       unsigned long irq_flags;
+       unsigned int trace_ctx;
        int size = sizeof(struct bputs_entry);
        int ret = 0;
-       int pc;
 
        if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
                return 0;
 
-       pc = preempt_count();
-
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
 
-       local_save_flags(irq_flags);
+       trace_ctx = tracing_gen_ctx();
        buffer = global_trace.array_buffer.buffer;
 
        ring_buffer_nest_start(buffer);
        event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
-                                           irq_flags, pc);
+                                           trace_ctx);
        if (!event)
                goto out;
 
@@ -1088,7 +1083,7 @@ int __trace_bputs(unsigned long ip, const char *str)
        entry->str                      = str;
 
        __buffer_unlock_commit(buffer, event);
-       ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
+       ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
 
        ret = 1;
  out:
@@ -2584,36 +2579,69 @@ enum print_line_t trace_handle_return(struct trace_seq *s)
 }
 EXPORT_SYMBOL_GPL(trace_handle_return);
 
-void
-tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
-                            unsigned long flags, int pc)
+unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
 {
-       struct task_struct *tsk = current;
+       unsigned int trace_flags = 0;
+       unsigned int pc;
+
+       pc = preempt_count();
 
-       entry->preempt_count            = pc & 0xff;
-       entry->pid                      = (tsk) ? tsk->pid : 0;
-       entry->type                     = type;
-       entry->flags =
 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-               (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
+       if (irqs_disabled_flags(irqflags))
+               trace_flags |= TRACE_FLAG_IRQS_OFF;
 #else
-               TRACE_FLAG_IRQS_NOSUPPORT |
+       trace_flags |= TRACE_FLAG_IRQS_NOSUPPORT;
 #endif
-               ((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
-               ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
-               ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
-               (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
-               (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
+
+       if (pc & NMI_MASK)
+               trace_flags |= TRACE_FLAG_NMI;
+       if (pc & HARDIRQ_MASK)
+               trace_flags |= TRACE_FLAG_HARDIRQ;
+
+       if (pc & SOFTIRQ_OFFSET)
+               trace_flags |= TRACE_FLAG_SOFTIRQ;
+
+       if (tif_need_resched())
+               trace_flags |= TRACE_FLAG_NEED_RESCHED;
+       if (test_preempt_need_resched())
+               trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
+       return (trace_flags << 16) | (pc & 0xff);
+}
+
+unsigned int tracing_gen_ctx(void)
+{
+       unsigned long irqflags;
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+       local_save_flags(irqflags);
+#else
+       irqflags = 0;
+#endif
+       return tracing_gen_ctx_flags(irqflags);
+}
+
+unsigned int tracing_gen_ctx_dec(void)
+{
+       unsigned int trace_ctx;
+
+       trace_ctx = tracing_gen_ctx();
+
+       /*
+        * Subtract one from the preeption counter if preemption is enabled,
+        * see trace_event_buffer_reserve()for details.
+        */
+       if (IS_ENABLED(CONFIG_PREEMPTION))
+               trace_ctx--;
+       return trace_ctx;
 }
-EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
 
 struct ring_buffer_event *
 trace_buffer_lock_reserve(struct trace_buffer *buffer,
                          int type,
                          unsigned long len,
-                         unsigned long flags, int pc)
+                         unsigned int trace_ctx)
 {
-       return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
+       return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
 }
 
 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
@@ -2733,7 +2761,7 @@ struct ring_buffer_event *
 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
                          struct trace_event_file *trace_file,
                          int type, unsigned long len,
-                         unsigned long flags, int pc)
+                         unsigned int trace_ctx)
 {
        struct ring_buffer_event *entry;
        int val;
@@ -2746,15 +2774,15 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
                /* Try to use the per cpu buffer first */
                val = this_cpu_inc_return(trace_buffered_event_cnt);
                if (val == 1) {
-                       trace_event_setup(entry, type, flags, pc);
+                       trace_event_setup(entry, type, trace_ctx);
                        entry->array[0] = len;
                        return entry;
                }
                this_cpu_dec(trace_buffered_event_cnt);
        }
 
-       entry = __trace_buffer_lock_reserve(*current_rb,
-                                           type, len, flags, pc);
+       entry = __trace_buffer_lock_reserve(*current_rb, type, len,
+                                           trace_ctx);
        /*
         * If tracing is off, but we have triggers enabled
         * we still need to look at the event data. Use the temp_buffer
@@ -2763,8 +2791,8 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
         */
        if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
                *current_rb = temp_buffer;
-               entry = __trace_buffer_lock_reserve(*current_rb,
-                                                   type, len, flags, pc);
+               entry = __trace_buffer_lock_reserve(*current_rb, type, len,
+                                                   trace_ctx);
        }
        return entry;
 }
@@ -2850,7 +2878,7 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
                ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
        event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
                                    fbuffer->event, fbuffer->entry,
-                                   fbuffer->flags, fbuffer->pc, fbuffer->regs);
+                                   fbuffer->trace_ctx, fbuffer->regs);
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
@@ -2866,7 +2894,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
                                     struct trace_buffer *buffer,
                                     struct ring_buffer_event *event,
-                                    unsigned long flags, int pc,
+                                    unsigned int trace_ctx,
                                     struct pt_regs *regs)
 {
        __buffer_unlock_commit(buffer, event);
@@ -2877,8 +2905,8 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
         * and mmiotrace, but that's ok if they lose a function or
         * two. They are not that meaningful.
         */
-       ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
-       ftrace_trace_userstack(tr, buffer, flags, pc);
+       ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
+       ftrace_trace_userstack(tr, buffer, trace_ctx);
 }
 
 /*
@@ -2892,9 +2920,8 @@ trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
 }
 
 void
-trace_function(struct trace_array *tr,
-              unsigned long ip, unsigned long parent_ip, unsigned long flags,
-              int pc)
+trace_function(struct trace_array *tr, unsigned long ip, unsigned long
+              parent_ip, unsigned int trace_ctx)
 {
        struct trace_event_call *call = &event_function;
        struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -2902,7 +2929,7 @@ trace_function(struct trace_array *tr,
        struct ftrace_entry *entry;
 
        event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
-                                           flags, pc);
+                                           trace_ctx);
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
@@ -2936,8 +2963,8 @@ static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
 
 static void __ftrace_trace_stack(struct trace_buffer *buffer,
-                                unsigned long flags,
-                                int skip, int pc, struct pt_regs *regs)
+                                unsigned int trace_ctx,
+                                int skip, struct pt_regs *regs)
 {
        struct trace_event_call *call = &event_kernel_stack;
        struct ring_buffer_event *event;
@@ -2984,7 +3011,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
 
        size = nr_entries * sizeof(unsigned long);
        event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
-                                           sizeof(*entry) + size, flags, pc);
+                                           sizeof(*entry) + size, trace_ctx);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
@@ -3005,22 +3032,22 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
 
 static inline void ftrace_trace_stack(struct trace_array *tr,
                                      struct trace_buffer *buffer,
-                                     unsigned long flags,
-                                     int skip, int pc, struct pt_regs *regs)
+                                     unsigned int trace_ctx,
+                                     int skip, struct pt_regs *regs)
 {
        if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
                return;
 
-       __ftrace_trace_stack(buffer, flags, skip, pc, regs);
+       __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
 }
 
-void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
-                  int pc)
+void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
+                  int skip)
 {
        struct trace_buffer *buffer = tr->array_buffer.buffer;
 
        if (rcu_is_watching()) {
-               __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
+               __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
                return;
        }
 
@@ -3034,7 +3061,7 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
                return;
 
        rcu_irq_enter_irqson();
-       __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
+       __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
        rcu_irq_exit_irqson();
 }
 
@@ -3044,19 +3071,15 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
  */
 void trace_dump_stack(int skip)
 {
-       unsigned long flags;
-
        if (tracing_disabled || tracing_selftest_running)
                return;
 
-       local_save_flags(flags);
-
 #ifndef CONFIG_UNWINDER_ORC
        /* Skip 1 to skip this function. */
        skip++;
 #endif
        __ftrace_trace_stack(global_trace.array_buffer.buffer,
-                            flags, skip, preempt_count(), NULL);
+                            tracing_gen_ctx(), skip, NULL);
 }
 EXPORT_SYMBOL_GPL(trace_dump_stack);
 
@@ -3065,7 +3088,7 @@ static DEFINE_PER_CPU(int, user_stack_count);
 
 static void
 ftrace_trace_userstack(struct trace_array *tr,
-                      struct trace_buffer *buffer, unsigned long flags, int pc)
+                      struct trace_buffer *buffer, unsigned int trace_ctx)
 {
        struct trace_event_call *call = &event_user_stack;
        struct ring_buffer_event *event;
@@ -3092,7 +3115,7 @@ ftrace_trace_userstack(struct trace_array *tr,
        __this_cpu_inc(user_stack_count);
 
        event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
-                                           sizeof(*entry), flags, pc);
+                                           sizeof(*entry), trace_ctx);
        if (!event)
                goto out_drop_count;
        entry   = ring_buffer_event_data(event);
@@ -3112,7 +3135,7 @@ ftrace_trace_userstack(struct trace_array *tr,
 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
 static void ftrace_trace_userstack(struct trace_array *tr,
                                   struct trace_buffer *buffer,
-                                  unsigned long flags, int pc)
+                                  unsigned int trace_ctx)
 {
 }
 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
@@ -3242,9 +3265,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        struct trace_buffer *buffer;
        struct trace_array *tr = &global_trace;
        struct bprint_entry *entry;
-       unsigned long flags;
+       unsigned int trace_ctx;
        char *tbuffer;
-       int len = 0, size, pc;
+       int len = 0, size;
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
@@ -3252,7 +3275,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        /* Don't pollute graph traces with trace_vprintk internals */
        pause_graph_tracing();
 
-       pc = preempt_count();
+       trace_ctx = tracing_gen_ctx();
        preempt_disable_notrace();
 
        tbuffer = get_trace_buf();
@@ -3266,12 +3289,11 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
                goto out_put;
 
-       local_save_flags(flags);
        size = sizeof(*entry) + sizeof(u32) * len;
        buffer = tr->array_buffer.buffer;
        ring_buffer_nest_start(buffer);
        event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
-                                           flags, pc);
+                                           trace_ctx);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
@@ -3281,7 +3303,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        memcpy(entry->buf, tbuffer, sizeof(u32) * len);
        if (!call_filter_check_discard(call, entry, buffer, event)) {
                __buffer_unlock_commit(buffer, event);
-               ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
+               ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
        }
 
 out:
@@ -3304,9 +3326,9 @@ __trace_array_vprintk(struct trace_buffer *buffer,
 {
        struct trace_event_call *call = &event_print;
        struct ring_buffer_event *event;
-       int len = 0, size, pc;
+       int len = 0, size;
        struct print_entry *entry;
-       unsigned long flags;
+       unsigned int trace_ctx;
        char *tbuffer;
 
        if (tracing_disabled || tracing_selftest_running)
@@ -3315,7 +3337,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
        /* Don't pollute graph traces with trace_vprintk internals */
        pause_graph_tracing();
 
-       pc = preempt_count();
+       trace_ctx = tracing_gen_ctx();
        preempt_disable_notrace();
 
 
@@ -3327,11 +3349,10 @@ __trace_array_vprintk(struct trace_buffer *buffer,
 
        len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
 
-       local_save_flags(flags);
        size = sizeof(*entry) + len + 1;
        ring_buffer_nest_start(buffer);
        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
-                                           flags, pc);
+                                           trace_ctx);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
@@ -3340,7 +3361,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
        memcpy(&entry->buf, tbuffer, len + 1);
        if (!call_filter_check_discard(call, entry, buffer, event)) {
                __buffer_unlock_commit(buffer, event);
-               ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
+               ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
        }
 
 out:
@@ -6653,7 +6674,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
        enum event_trigger_type tt = ETT_NONE;
        struct trace_buffer *buffer;
        struct print_entry *entry;
-       unsigned long irq_flags;
        ssize_t written;
        int size;
        int len;
@@ -6673,7 +6693,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
 
        BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
 
-       local_save_flags(irq_flags);
        size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
 
        /* If less than "<faulted>", then make sure we can still add that */
@@ -6682,7 +6701,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
 
        buffer = tr->array_buffer.buffer;
        event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
-                                           irq_flags, preempt_count());
+                                           tracing_gen_ctx());
        if (unlikely(!event))
                /* Ring buffer disabled, return as if not open for write */
                return -EBADF;
@@ -6734,7 +6753,6 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
        struct ring_buffer_event *event;
        struct trace_buffer *buffer;
        struct raw_data_entry *entry;
-       unsigned long irq_flags;
        ssize_t written;
        int size;
        int len;
@@ -6756,14 +6774,13 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
 
        BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
 
-       local_save_flags(irq_flags);
        size = sizeof(*entry) + cnt;
        if (cnt < FAULT_SIZE_ID)
                size += FAULT_SIZE_ID - cnt;
 
        buffer = tr->array_buffer.buffer;
        event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
-                                           irq_flags, preempt_count());
+                                           tracing_gen_ctx());
        if (!event)
                /* Ring buffer disabled, return as if not open for write */
                return -EBADF;