tracepoint: Make rcuidle tracepoint callers use SRCU
authorJoel Fernandes (Google) <joel@joelfernandes.org>
Mon, 30 Jul 2018 22:24:22 +0000 (15:24 -0700)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Mon, 30 Jul 2018 23:13:03 +0000 (19:13 -0400)
In recent tests with IRQ on/off tracepoints, a large performance
overhead ~10% is noticed when running hackbench. This is root caused to
calls to rcu_irq_enter_irqson and rcu_irq_exit_irqson from the
tracepoint code. Following a long discussion on the list [1] about this,
we concluded that srcu is a better alternative for use during rcu idle.
Although it does involve extra barriers, its lighter than the sched-rcu
version which has to do additional RCU calls to notify RCU idle about
entry into RCU sections.

In this patch, we change the underlying implementation of the
trace_*_rcuidle API to use SRCU. This has shown to improve performance
alot for the high frequency irq enable/disable tracepoints.

Test: Tested idle and preempt/irq tracepoints.

Here are some performance numbers:

With a run of the following 30 times on a single core x86 Qemu instance
with 1GB memory:
hackbench -g 4 -f 2 -l 3000

Completion times in seconds. CONFIG_PROVE_LOCKING=y.

No patches (without this series)
Mean: 3.048
Median: 3.025
Std Dev: 0.064

With Lockdep using irq tracepoints with RCU implementation:
Mean: 3.451   (-11.66 %)
Median: 3.447 (-12.22%)
Std Dev: 0.049

With Lockdep using irq tracepoints with SRCU implementation (this series):
Mean: 3.020   (I would consider the improvement against the "without
       this series" case as just noise).
Median: 3.013
Std Dev: 0.033

[1] https://patchwork.kernel.org/patch/10344297/

[remove rcu_read_lock_sched_notrace as its the equivalent of
preempt_disable_notrace and is unnecessary to call in tracepoint code]
Link: http://lkml.kernel.org/r/20180730222423.196630-3-joel@joelfernandes.org
Cleaned-up-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
[ Simplified WARN_ON_ONCE() ]
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
include/linux/tracepoint.h
kernel/tracepoint.c

index 19a690b..d9a084c 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/smp.h>
+#include <linux/srcu.h>
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/cpumask.h>
@@ -33,6 +34,8 @@ struct trace_eval_map {
 
 #define TRACEPOINT_DEFAULT_PRIO        10
 
+extern struct srcu_struct tracepoint_srcu;
+
 extern int
 tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
 extern int
@@ -75,10 +78,16 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
  * probe unregistration and the end of module exit to make sure there is no
  * caller executing a probe when it is freed.
  */
+#ifdef CONFIG_TRACEPOINTS
 static inline void tracepoint_synchronize_unregister(void)
 {
+       synchronize_srcu(&tracepoint_srcu);
        synchronize_sched();
 }
+#else
+static inline void tracepoint_synchronize_unregister(void)
+{ }
+#endif
 
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
 extern int syscall_regfunc(void);
@@ -129,18 +138,31 @@ extern void syscall_unregfunc(void);
  * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
  * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
  */
-#define __DO_TRACE(tp, proto, args, cond, rcucheck)                    \
+#define __DO_TRACE(tp, proto, args, cond, rcuidle)                     \
        do {                                                            \
                struct tracepoint_func *it_func_ptr;                    \
                void *it_func;                                          \
                void *__data;                                           \
+               int __maybe_unused idx = 0;                             \
                                                                        \
                if (!(cond))                                            \
                        return;                                         \
-               if (rcucheck)                                           \
-                       rcu_irq_enter_irqson();                         \
-               rcu_read_lock_sched_notrace();                          \
-               it_func_ptr = rcu_dereference_sched((tp)->funcs);       \
+                                                                       \
+               /* srcu can't be used from NMI */                       \
+               WARN_ON_ONCE(rcuidle && in_nmi());                      \
+                                                                       \
+               /* keep srcu and sched-rcu usage consistent */          \
+               preempt_disable_notrace();                              \
+                                                                       \
+               /*                                                      \
+                * For rcuidle callers, use srcu since sched-rcu        \
+                * doesn't work from the idle path.                     \
+                */                                                     \
+               if (rcuidle)                                            \
+                       idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+                                                                       \
+               it_func_ptr = rcu_dereference_raw((tp)->funcs);         \
+                                                                       \
                if (it_func_ptr) {                                      \
                        do {                                            \
                                it_func = (it_func_ptr)->func;          \
@@ -148,9 +170,11 @@ extern void syscall_unregfunc(void);
                                ((void(*)(proto))(it_func))(args);      \
                        } while ((++it_func_ptr)->func);                \
                }                                                       \
-               rcu_read_unlock_sched_notrace();                        \
-               if (rcucheck)                                           \
-                       rcu_irq_exit_irqson();                          \
+                                                                       \
+               if (rcuidle)                                            \
+                       srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+                                                                       \
+               preempt_enable_notrace();                               \
        } while (0)
 
 #ifndef MODULE
index 6dc6356..955148d 100644 (file)
@@ -31,6 +31,9 @@
 extern struct tracepoint * const __start___tracepoints_ptrs[];
 extern struct tracepoint * const __stop___tracepoints_ptrs[];
 
+DEFINE_SRCU(tracepoint_srcu);
+EXPORT_SYMBOL_GPL(tracepoint_srcu);
+
 /* Set to 1 to enable tracepoint debug output */
 static const int tracepoint_debug;
 
@@ -67,16 +70,27 @@ static inline void *allocate_probes(int count)
        return p == NULL ? NULL : p->probes;
 }
 
-static void rcu_free_old_probes(struct rcu_head *head)
+static void srcu_free_old_probes(struct rcu_head *head)
 {
        kfree(container_of(head, struct tp_probes, rcu));
 }
 
+static void rcu_free_old_probes(struct rcu_head *head)
+{
+       call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
+}
+
 static inline void release_probes(struct tracepoint_func *old)
 {
        if (old) {
                struct tp_probes *tp_probes = container_of(old,
                        struct tp_probes, probes[0]);
+               /*
+                * Tracepoint probes are protected by both sched RCU and SRCU,
+                * by calling the SRCU callback in the sched RCU callback we
+                * cover both cases. So let us chain the SRCU and sched RCU
+                * callbacks to wait for both grace periods.
+                */
                call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes);
        }
 }