refscale: Convert test_lock spinlock to raw_spinlock
authorZqiang <qiang1.zhang@intel.com>
Sun, 12 Jun 2022 02:02:25 +0000 (10:02 +0800)
committerPaul E. McKenney <paulmck@kernel.org>
Tue, 21 Jun 2022 22:57:04 +0000 (15:57 -0700)
In kernels built with CONFIG_PREEMPT_RT=y, spinlocks are replaced by
rt_mutex, which can sleep.  This means that acquiring a non-raw spinlock
in a critical section where preemption is disabled can trigger the
following BUG:

BUG: scheduling while atomic: ref_scale_reade/76/0x00000002
Preemption disabled at:
ref_lock_section+0x16/0x80
Call Trace:
<TASK>
dump_stack_lvl+0x5b/0x82
dump_stack+0x10/0x12
__schedule_bug.cold+0x9c/0xad
__schedule+0x839/0xc00
schedule_rtlock+0x22/0x40
rtlock_slowlock_locked+0x460/0x1350
rt_spin_lock+0x61/0xe0
ref_lock_section+0x29/0x80
rcu_scale_one_reader+0x52/0x60
ref_scale_reader+0x28d/0x490
kthread+0x128/0x150
ret_from_fork+0x22/0x30
</TASK>

This commit therefore converts spinlock to raw_spinlock.

Signed-off-by: Zqiang <qiang1.zhang@intel.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/refscale.c

index 909644a..435c884 100644 (file)
@@ -385,7 +385,7 @@ static struct ref_scale_ops rwsem_ops = {
 };
 
 // Definitions for global spinlock
-static DEFINE_SPINLOCK(test_lock);
+static DEFINE_RAW_SPINLOCK(test_lock);
 
 static void ref_lock_section(const int nloops)
 {
@@ -393,8 +393,8 @@ static void ref_lock_section(const int nloops)
 
        preempt_disable();
        for (i = nloops; i >= 0; i--) {
-               spin_lock(&test_lock);
-               spin_unlock(&test_lock);
+               raw_spin_lock(&test_lock);
+               raw_spin_unlock(&test_lock);
        }
        preempt_enable();
 }
@@ -405,9 +405,9 @@ static void ref_lock_delay_section(const int nloops, const int udl, const int nd
 
        preempt_disable();
        for (i = nloops; i >= 0; i--) {
-               spin_lock(&test_lock);
+               raw_spin_lock(&test_lock);
                un_delay(udl, ndl);
-               spin_unlock(&test_lock);
+               raw_spin_unlock(&test_lock);
        }
        preempt_enable();
 }
@@ -427,8 +427,8 @@ static void ref_lock_irq_section(const int nloops)
 
        preempt_disable();
        for (i = nloops; i >= 0; i--) {
-               spin_lock_irqsave(&test_lock, flags);
-               spin_unlock_irqrestore(&test_lock, flags);
+               raw_spin_lock_irqsave(&test_lock, flags);
+               raw_spin_unlock_irqrestore(&test_lock, flags);
        }
        preempt_enable();
 }
@@ -440,9 +440,9 @@ static void ref_lock_irq_delay_section(const int nloops, const int udl, const in
 
        preempt_disable();
        for (i = nloops; i >= 0; i--) {
-               spin_lock_irqsave(&test_lock, flags);
+               raw_spin_lock_irqsave(&test_lock, flags);
                un_delay(udl, ndl);
-               spin_unlock_irqrestore(&test_lock, flags);
+               raw_spin_unlock_irqrestore(&test_lock, flags);
        }
        preempt_enable();
 }