Steve reported that lockdep_assert*irq*(), when nested inside lockdep
itself, will trigger a false-positive.
One example is the stack-trace code, as called from inside lockdep,
triggering tracing, which in turn calls RCU, which then uses
lockdep_assert_irqs_disabled().
Fixes:
a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables")
Reported-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
+DECLARE_PER_CPU(unsigned int, lockdep_recursion);
/*
* The below lockdep_assert_*() macros use raw_cpu_read() to access the above
/*
* The below lockdep_assert_*() macros use raw_cpu_read() to access the above
* read the value from our previous CPU.
*/
* read the value from our previous CPU.
*/
+#define __lockdep_enabled (debug_locks && !raw_cpu_read(lockdep_recursion))
+
#define lockdep_assert_irqs_enabled() \
do { \
#define lockdep_assert_irqs_enabled() \
do { \
- WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \
+ WARN_ON_ONCE(__lockdep_enabled && !raw_cpu_read(hardirqs_enabled)); \
} while (0)
#define lockdep_assert_irqs_disabled() \
do { \
} while (0)
#define lockdep_assert_irqs_disabled() \
do { \
- WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \
+ WARN_ON_ONCE(__lockdep_enabled && raw_cpu_read(hardirqs_enabled)); \
} while (0)
#define lockdep_assert_in_irq() \
do { \
} while (0)
#define lockdep_assert_in_irq() \
do { \
- WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \
+ WARN_ON_ONCE(__lockdep_enabled && !raw_cpu_read(hardirq_context)); \
} while (0)
#define lockdep_assert_preemption_enabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
} while (0)
#define lockdep_assert_preemption_enabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
(preempt_count() != 0 || \
!raw_cpu_read(hardirqs_enabled))); \
} while (0)
(preempt_count() != 0 || \
!raw_cpu_read(hardirqs_enabled))); \
} while (0)
#define lockdep_assert_preemption_disabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
#define lockdep_assert_preemption_disabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
(preempt_count() == 0 && \
raw_cpu_read(hardirqs_enabled))); \
} while (0)
(preempt_count() == 0 && \
raw_cpu_read(hardirqs_enabled))); \
} while (0)
#define lock_stat 0
#endif
#define lock_stat 0
#endif
+DEFINE_PER_CPU(unsigned int, lockdep_recursion);
+EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
+
+static inline bool lockdep_enabled(void)
+{
+ if (!debug_locks)
+ return false;
+
+ if (raw_cpu_read(lockdep_recursion))
+ return false;
+
+ if (current->lockdep_recursion)
+ return false;
+
+ return true;
+}
+
/*
* lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators.
/*
* lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators.
arch_spin_lock(&__lock);
__owner = current;
arch_spin_lock(&__lock);
__owner = current;
- current->lockdep_recursion++;
+ __this_cpu_inc(lockdep_recursion);
}
static inline void lockdep_unlock(void)
}
static inline void lockdep_unlock(void)
if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
return;
if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
return;
- current->lockdep_recursion--;
+ __this_cpu_dec(lockdep_recursion);
__owner = NULL;
arch_spin_unlock(&__lock);
}
__owner = NULL;
arch_spin_unlock(&__lock);
}
task->lockdep_recursion = 0;
}
task->lockdep_recursion = 0;
}
+static __always_inline void lockdep_recursion_inc(void)
+{
+ __this_cpu_inc(lockdep_recursion);
+}
+
static __always_inline void lockdep_recursion_finish(void)
{
static __always_inline void lockdep_recursion_finish(void)
{
- if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK))
- current->lockdep_recursion = 0;
+ if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
+ __this_cpu_write(lockdep_recursion, 0);
}
void lockdep_set_selftest_task(struct task_struct *task)
}
void lockdep_set_selftest_task(struct task_struct *task)
if (unlikely(in_nmi()))
return;
if (unlikely(in_nmi()))
return;
- if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
+ if (unlikely(__this_cpu_read(lockdep_recursion)))
return;
if (unlikely(lockdep_hardirqs_enabled())) {
return;
if (unlikely(lockdep_hardirqs_enabled())) {
current->hardirq_chain_key = current->curr_chain_key;
current->hardirq_chain_key = current->curr_chain_key;
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
__trace_hardirqs_on_caller();
lockdep_recursion_finish();
}
__trace_hardirqs_on_caller();
lockdep_recursion_finish();
}
- if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
+ if (unlikely(__this_cpu_read(lockdep_recursion)))
return;
if (lockdep_hardirqs_enabled()) {
return;
if (lockdep_hardirqs_enabled()) {
if (in_nmi()) {
if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
return;
if (in_nmi()) {
if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
return;
- } else if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
+ } else if (__this_cpu_read(lockdep_recursion))
{
struct irqtrace_events *trace = ¤t->irqtrace;
{
struct irqtrace_events *trace = ¤t->irqtrace;
- if (unlikely(!debug_locks || current->lockdep_recursion))
+ if (unlikely(!lockdep_enabled()))
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
/*
* We'll do an OFF -> ON transition:
*/
/*
* We'll do an OFF -> ON transition:
*/
*/
void lockdep_softirqs_off(unsigned long ip)
{
*/
void lockdep_softirqs_off(unsigned long ip)
{
- if (unlikely(!debug_locks || current->lockdep_recursion))
+ if (unlikely(!lockdep_enabled()))
if (subclass) {
unsigned long flags;
if (subclass) {
unsigned long flags;
- if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
return;
raw_local_irq_save(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
register_lock_class(lock, subclass, 1);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
register_lock_class(lock, subclass, 1);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
- if (unlikely(current->lockdep_recursion))
+ if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
return;
raw_local_irq_save(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
check_flags(flags);
if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current);
check_flags(flags);
if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current);
- if (unlikely(current->lockdep_recursion))
+ if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
return;
raw_local_irq_save(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
check_flags(flags);
if (__lock_downgrade(lock, ip))
check_chain_key(current);
check_flags(flags);
if (__lock_downgrade(lock, ip))
check_chain_key(current);
static bool lockdep_nmi(void)
{
static bool lockdep_nmi(void)
{
- if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
+ if (raw_cpu_read(lockdep_recursion))
return false;
if (!in_nmi())
return false;
if (!in_nmi())
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
- if (unlikely(current->lockdep_recursion)) {
+ if (!debug_locks)
+ return;
+
+ if (unlikely(!lockdep_enabled())) {
/* XXX allow trylock from NMI ?!? */
if (lockdep_nmi() && !trylock) {
struct held_lock hlock;
/* XXX allow trylock from NMI ?!? */
if (lockdep_nmi() && !trylock) {
struct held_lock hlock;
raw_local_irq_save(flags);
check_flags(flags);
raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
lockdep_recursion_finish();
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
lockdep_recursion_finish();
trace_lock_release(lock, ip);
trace_lock_release(lock, ip);
- if (unlikely(current->lockdep_recursion))
+ if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
return;
raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
if (__lock_release(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
if (__lock_release(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
unsigned long flags;
int ret = 0;
unsigned long flags;
int ret = 0;
- if (unlikely(current->lockdep_recursion))
+ if (unlikely(!lockdep_enabled()))
return 1; /* avoid false negative lockdep_assert_held() */
raw_local_irq_save(flags);
check_flags(flags);
return 1; /* avoid false negative lockdep_assert_held() */
raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
ret = __lock_is_held(lock, read);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
ret = __lock_is_held(lock, read);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
struct pin_cookie cookie = NIL_COOKIE;
unsigned long flags;
struct pin_cookie cookie = NIL_COOKIE;
unsigned long flags;
- if (unlikely(current->lockdep_recursion))
+ if (unlikely(!lockdep_enabled()))
return cookie;
raw_local_irq_save(flags);
check_flags(flags);
return cookie;
raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
cookie = __lock_pin_lock(lock);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
cookie = __lock_pin_lock(lock);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
- if (unlikely(current->lockdep_recursion))
+ if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
return;
raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
__lock_repin_lock(lock, cookie);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
__lock_repin_lock(lock, cookie);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
- if (unlikely(current->lockdep_recursion))
+ if (unlikely(!lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
return;
raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
__lock_unpin_lock(lock, cookie);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
__lock_unpin_lock(lock, cookie);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
trace_lock_acquired(lock, ip);
trace_lock_acquired(lock, ip);
- if (unlikely(!lock_stat || !debug_locks))
- return;
-
- if (unlikely(current->lockdep_recursion))
+ if (unlikely(!lock_stat || !lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
return;
raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
__lock_contended(lock, ip);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
__lock_contended(lock, ip);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
trace_lock_contended(lock, ip);
trace_lock_contended(lock, ip);
- if (unlikely(!lock_stat || !debug_locks))
- return;
-
- if (unlikely(current->lockdep_recursion))
+ if (unlikely(!lock_stat || !lockdep_enabled()))
return;
raw_local_irq_save(flags);
check_flags(flags);
return;
raw_local_irq_save(flags);
check_flags(flags);
- current->lockdep_recursion++;
+ lockdep_recursion_inc();
__lock_acquired(lock, ip);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
__lock_acquired(lock, ip);
lockdep_recursion_finish();
raw_local_irq_restore(flags);