Merge tag 'audit-pr-20210215' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoor...
[linux-2.6-microblaze.git] / lib / locking-selftest.c
index 9959ea2..2d85aba 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/debug_locks.h>
 #include <linux/irqflags.h>
 #include <linux/rtmutex.h>
+#include <linux/local_lock.h>
 
 /*
  * Change this to 1 if you want to see the failure printouts:
@@ -51,6 +52,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
 #define LOCKTYPE_RWSEM 0x8
 #define LOCKTYPE_WW    0x10
 #define LOCKTYPE_RTMUTEX 0x20
+#define LOCKTYPE_LL    0x40
 
 static struct ww_acquire_ctx t, t2;
 static struct ww_mutex o, o2, o3;
@@ -64,6 +66,9 @@ static DEFINE_SPINLOCK(lock_B);
 static DEFINE_SPINLOCK(lock_C);
 static DEFINE_SPINLOCK(lock_D);
 
+static DEFINE_RAW_SPINLOCK(raw_lock_A);
+static DEFINE_RAW_SPINLOCK(raw_lock_B);
+
 static DEFINE_RWLOCK(rwlock_A);
 static DEFINE_RWLOCK(rwlock_B);
 static DEFINE_RWLOCK(rwlock_C);
@@ -133,6 +138,8 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
 
 #endif
 
+static local_lock_t local_A = INIT_LOCAL_LOCK(local_A);
+
 /*
  * non-inlined runtime initializers, to let separate locks share
  * the same lock-class:
@@ -1306,19 +1313,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
+# define I_RAW_SPINLOCK(x)     lockdep_reset_lock(&raw_lock_##x.dep_map)
 # define I_RWLOCK(x)   lockdep_reset_lock(&rwlock_##x.dep_map)
 # define I_MUTEX(x)    lockdep_reset_lock(&mutex_##x.dep_map)
 # define I_RWSEM(x)    lockdep_reset_lock(&rwsem_##x.dep_map)
 # define I_WW(x)       lockdep_reset_lock(&x.dep_map)
+# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map)
 #ifdef CONFIG_RT_MUTEXES
 # define I_RTMUTEX(x)  lockdep_reset_lock(&rtmutex_##x.dep_map)
 #endif
 #else
 # define I_SPINLOCK(x)
+# define I_RAW_SPINLOCK(x)
 # define I_RWLOCK(x)
 # define I_MUTEX(x)
 # define I_RWSEM(x)
 # define I_WW(x)
+# define I_LOCAL_LOCK(x)
 #endif
 
 #ifndef I_RTMUTEX
@@ -1358,9 +1369,16 @@ static void reset_locks(void)
        I1(A); I1(B); I1(C); I1(D);
        I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
        I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
+       I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B);
+       I_LOCAL_LOCK(A);
+
        lockdep_reset();
+
        I2(A); I2(B); I2(C); I2(D);
        init_shared_classes();
+       raw_spin_lock_init(&raw_lock_A);
+       raw_spin_lock_init(&raw_lock_B);
+       local_lock_init(&local_A);
 
        ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
        memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
@@ -1382,6 +1400,8 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
 
        WARN_ON(irqs_disabled());
 
+       debug_locks_silent = !(debug_locks_verbose & lockclass_mask);
+
        testcase_fn();
        /*
         * Filter out expected failures:
@@ -1402,7 +1422,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
        }
        testcase_total++;
 
-       if (debug_locks_verbose)
+       if (debug_locks_verbose & lockclass_mask)
                pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
                        lockclass_mask, debug_locks, expected);
        /*
@@ -2419,6 +2439,311 @@ static void fs_reclaim_tests(void)
        pr_cont("\n");
 }
 
+#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup)))
+
+static void hardirq_exit(int *_)
+{
+       HARDIRQ_EXIT();
+}
+
+#define HARDIRQ_CONTEXT(name, ...)                                     \
+       int hardirq_guard_##name __guard(hardirq_exit);                 \
+       HARDIRQ_ENTER();
+
+#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...)                         \
+       int notthreaded_hardirq_guard_##name __guard(hardirq_exit);     \
+       local_irq_disable();                                            \
+       __irq_enter();                                                  \
+       WARN_ON(!in_irq());
+
+static void softirq_exit(int *_)
+{
+       SOFTIRQ_EXIT();
+}
+
+#define SOFTIRQ_CONTEXT(name, ...)                             \
+       int softirq_guard_##name __guard(softirq_exit);         \
+       SOFTIRQ_ENTER();
+
+static void rcu_exit(int *_)
+{
+       rcu_read_unlock();
+}
+
+#define RCU_CONTEXT(name, ...)                                 \
+       int rcu_guard_##name __guard(rcu_exit);                 \
+       rcu_read_lock();
+
+static void rcu_bh_exit(int *_)
+{
+       rcu_read_unlock_bh();
+}
+
+#define RCU_BH_CONTEXT(name, ...)                              \
+       int rcu_bh_guard_##name __guard(rcu_bh_exit);           \
+       rcu_read_lock_bh();
+
+static void rcu_sched_exit(int *_)
+{
+       rcu_read_unlock_sched();
+}
+
+#define RCU_SCHED_CONTEXT(name, ...)                           \
+       int rcu_sched_guard_##name __guard(rcu_sched_exit);     \
+       rcu_read_lock_sched();
+
+static void rcu_callback_exit(int *_)
+{
+       rcu_lock_release(&rcu_callback_map);
+}
+
+#define RCU_CALLBACK_CONTEXT(name, ...)                                        \
+       int rcu_callback_guard_##name __guard(rcu_callback_exit);       \
+       rcu_lock_acquire(&rcu_callback_map);
+
+
+static void raw_spinlock_exit(raw_spinlock_t **lock)
+{
+       raw_spin_unlock(*lock);
+}
+
+#define RAW_SPINLOCK_CONTEXT(name, lock)                                               \
+       raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \
+       raw_spin_lock(&(lock));
+
+static void spinlock_exit(spinlock_t **lock)
+{
+       spin_unlock(*lock);
+}
+
+#define SPINLOCK_CONTEXT(name, lock)                                           \
+       spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock);     \
+       spin_lock(&(lock));
+
+static void mutex_exit(struct mutex **lock)
+{
+       mutex_unlock(*lock);
+}
+
+#define MUTEX_CONTEXT(name, lock)                                      \
+       struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
+       mutex_lock(&(lock));
+
+#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock)      \
+                                                                               \
+static void __maybe_unused inner##_in_##outer(void)                            \
+{                                                                              \
+       outer##_CONTEXT(_, outer_lock);                                         \
+       {                                                                       \
+               inner##_CONTEXT(_, inner_lock);                                 \
+       }                                                                       \
+}
+
+/*
+ * wait contexts (considering PREEMPT_RT)
+ *
+ * o: inner is allowed in outer
+ * x: inner is disallowed in outer
+ *
+ *       \  inner |  RCU  | RAW_SPIN | SPIN | MUTEX
+ * outer  \       |       |          |      |
+ * ---------------+-------+----------+------+-------
+ * HARDIRQ        |   o   |    o     |  o   |  x
+ * ---------------+-------+----------+------+-------
+ * NOTTHREADED_IRQ|   o   |    o     |  x   |  x
+ * ---------------+-------+----------+------+-------
+ * SOFTIRQ        |   o   |    o     |  o   |  x
+ * ---------------+-------+----------+------+-------
+ * RCU            |   o   |    o     |  o   |  x
+ * ---------------+-------+----------+------+-------
+ * RCU_BH         |   o   |    o     |  o   |  x
+ * ---------------+-------+----------+------+-------
+ * RCU_CALLBACK   |   o   |    o     |  o   |  x
+ * ---------------+-------+----------+------+-------
+ * RCU_SCHED      |   o   |    o     |  x   |  x
+ * ---------------+-------+----------+------+-------
+ * RAW_SPIN       |   o   |    o     |  x   |  x
+ * ---------------+-------+----------+------+-------
+ * SPIN           |   o   |    o     |  o   |  x
+ * ---------------+-------+----------+------+-------
+ * MUTEX          |   o   |    o     |  o   |  o
+ * ---------------+-------+----------+------+-------
+ */
+
+#define GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(inner, inner_lock)           \
+GENERATE_2_CONTEXT_TESTCASE(HARDIRQ, , inner, inner_lock)                      \
+GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock)          \
+GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock)                      \
+GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock)                          \
+GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock)                       \
+GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock)                 \
+GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock)                    \
+GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock)       \
+GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock)               \
+GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock)
+
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
+
+/* the outer context allows all kinds of preemption */
+#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer)                   \
+       dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK);               \
+       dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN);        \
+       dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN);            \
+       dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX);              \
+
+/*
+ * the outer context only allows the preemption introduced by spinlock_t (which
+ * is a sleepable lock for PREEMPT_RT)
+ */
+#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer)           \
+       dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK);               \
+       dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN);        \
+       dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN);            \
+       dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX);              \
+
+/* the outer doesn't allows any kind of preemption */
+#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer)                       \
+       dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK);               \
+       dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN);        \
+       dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN);            \
+       dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX);              \
+
+static void wait_context_tests(void)
+{
+       printk("  --------------------------------------------------------------------------\n");
+       printk("  | wait context tests |\n");
+       printk("  --------------------------------------------------------------------------\n");
+       printk("                                 | rcu  | raw  | spin |mutex |\n");
+       printk("  --------------------------------------------------------------------------\n");
+       print_testname("in hardirq context");
+       DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(HARDIRQ);
+       pr_cont("\n");
+
+       print_testname("in hardirq context (not threaded)");
+       DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(NOTTHREADED_HARDIRQ);
+       pr_cont("\n");
+
+       print_testname("in softirq context");
+       DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SOFTIRQ);
+       pr_cont("\n");
+
+       print_testname("in RCU context");
+       DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU);
+       pr_cont("\n");
+
+       print_testname("in RCU-bh context");
+       DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
+       pr_cont("\n");
+
+       print_testname("in RCU callback context");
+       DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK);
+       pr_cont("\n");
+
+       print_testname("in RCU-sched context");
+       DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
+       pr_cont("\n");
+
+       print_testname("in RAW_SPINLOCK context");
+       DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK);
+       pr_cont("\n");
+
+       print_testname("in SPINLOCK context");
+       DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK);
+       pr_cont("\n");
+
+       print_testname("in MUTEX context");
+       DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX);
+       pr_cont("\n");
+}
+
+static void local_lock_2(void)
+{
+       local_lock_acquire(&local_A);   /* IRQ-ON */
+       local_lock_release(&local_A);
+
+       HARDIRQ_ENTER();
+       spin_lock(&lock_A);             /* IN-IRQ */
+       spin_unlock(&lock_A);
+       HARDIRQ_EXIT()
+
+       HARDIRQ_DISABLE();
+       spin_lock(&lock_A);
+       local_lock_acquire(&local_A);   /* IN-IRQ <-> IRQ-ON cycle, false */
+       local_lock_release(&local_A);
+       spin_unlock(&lock_A);
+       HARDIRQ_ENABLE();
+}
+
+static void local_lock_3A(void)
+{
+       local_lock_acquire(&local_A);   /* IRQ-ON */
+       spin_lock(&lock_B);             /* IRQ-ON */
+       spin_unlock(&lock_B);
+       local_lock_release(&local_A);
+
+       HARDIRQ_ENTER();
+       spin_lock(&lock_A);             /* IN-IRQ */
+       spin_unlock(&lock_A);
+       HARDIRQ_EXIT()
+
+       HARDIRQ_DISABLE();
+       spin_lock(&lock_A);
+       local_lock_acquire(&local_A);   /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+       local_lock_release(&local_A);
+       spin_unlock(&lock_A);
+       HARDIRQ_ENABLE();
+}
+
+static void local_lock_3B(void)
+{
+       local_lock_acquire(&local_A);   /* IRQ-ON */
+       spin_lock(&lock_B);             /* IRQ-ON */
+       spin_unlock(&lock_B);
+       local_lock_release(&local_A);
+
+       HARDIRQ_ENTER();
+       spin_lock(&lock_A);             /* IN-IRQ */
+       spin_unlock(&lock_A);
+       HARDIRQ_EXIT()
+
+       HARDIRQ_DISABLE();
+       spin_lock(&lock_A);
+       local_lock_acquire(&local_A);   /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+       local_lock_release(&local_A);
+       spin_unlock(&lock_A);
+       HARDIRQ_ENABLE();
+
+       HARDIRQ_DISABLE();
+       spin_lock(&lock_A);
+       spin_lock(&lock_B);             /* IN-IRQ <-> IRQ-ON cycle, true */
+       spin_unlock(&lock_B);
+       spin_unlock(&lock_A);
+       HARDIRQ_DISABLE();
+
+}
+
+static void local_lock_tests(void)
+{
+       printk("  --------------------------------------------------------------------------\n");
+       printk("  | local_lock tests |\n");
+       printk("  ---------------------\n");
+
+       print_testname("local_lock inversion  2");
+       dotest(local_lock_2, SUCCESS, LOCKTYPE_LL);
+       pr_cont("\n");
+
+       print_testname("local_lock inversion 3A");
+       dotest(local_lock_3A, SUCCESS, LOCKTYPE_LL);
+       pr_cont("\n");
+
+       print_testname("local_lock inversion 3B");
+       dotest(local_lock_3B, FAILURE, LOCKTYPE_LL);
+       pr_cont("\n");
+}
+
 void locking_selftest(void)
 {
        /*
@@ -2446,7 +2771,6 @@ void locking_selftest(void)
        printk("  --------------------------------------------------------------------------\n");
 
        init_shared_classes();
-       debug_locks_silent = !debug_locks_verbose;
        lockdep_set_selftest_task(current);
 
        DO_TESTCASE_6R("A-A deadlock", AA);
@@ -2542,6 +2866,12 @@ void locking_selftest(void)
 
        fs_reclaim_tests();
 
+       /* Wait context test cases that are specific for RAW_LOCK_NESTING */
+       if (IS_ENABLED(CONFIG_PROVE_RAW_LOCK_NESTING))
+               wait_context_tests();
+
+       local_lock_tests();
+
        if (unexpected_testcase_failures) {
                printk("-----------------------------------------------------------------\n");
                debug_locks = 0;