tools, bpftool: Remove two unused variables.
[linux-2.6-microblaze.git] / include / linux / lockdep.h
index 6a584b3..f559487 100644 (file)
@@ -54,7 +54,11 @@ struct lock_list {
        struct lock_class               *class;
        struct lock_class               *links_to;
        const struct lock_trace         *trace;
-       int                             distance;
+       u16                             distance;
+       /* bitmap of different dependencies from head to this */
+       u8                              dep;
+       /* used by BFS to record whether "prev -> this" only has -(*R)-> */
+       u8                              only_xr;
 
        /*
         * The parent field is used to implement breadth-first search, and the
@@ -469,6 +473,20 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 }
 #endif
 
+/* Variable used to make lockdep treat read_lock() as recursive in selftests */
+#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
+extern unsigned int force_read_lock_recursive;
+#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
+#define force_read_lock_recursive 0
+#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
+
+#ifdef CONFIG_LOCKDEP
+extern bool read_lock_is_recursive(void);
+#else /* CONFIG_LOCKDEP */
+/* If !LOCKDEP, the value is meaningless */
+#define read_lock_is_recursive() 0
+#endif
+
 /*
  * For trivial one-depth nesting of a lock-class, the following
  * global define can be used. (Subsystems with multiple levels
@@ -490,7 +508,14 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 #define spin_release(l, i)                     lock_release(l, i)
 
 #define rwlock_acquire(l, s, t, i)             lock_acquire_exclusive(l, s, t, NULL, i)
-#define rwlock_acquire_read(l, s, t, i)                lock_acquire_shared_recursive(l, s, t, NULL, i)
+#define rwlock_acquire_read(l, s, t, i)                                        \
+do {                                                                   \
+       if (read_lock_is_recursive())                                   \
+               lock_acquire_shared_recursive(l, s, t, NULL, i);        \
+       else                                                            \
+               lock_acquire_shared(l, s, t, NULL, i);                  \
+} while (0)
+
 #define rwlock_release(l, i)                   lock_release(l, i)
 
 #define seqcount_acquire(l, s, t, i)           lock_acquire_exclusive(l, s, t, NULL, i)
@@ -512,19 +537,19 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 #define lock_map_release(l)                    lock_release(l, _THIS_IP_)
 
 #ifdef CONFIG_PROVE_LOCKING
-# define might_lock(lock)                                              \
+# define might_lock(lock)                                              \
 do {                                                                   \
        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
        lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
        lock_release(&(lock)->dep_map, _THIS_IP_);                      \
 } while (0)
-# define might_lock_read(lock)                                                 \
+# define might_lock_read(lock)                                         \
 do {                                                                   \
        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
        lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
        lock_release(&(lock)->dep_map, _THIS_IP_);                      \
 } while (0)
-# define might_lock_nested(lock, subclass)                             \
+# define might_lock_nested(lock, subclass)                             \
 do {                                                                   \
        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
        lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,         \
@@ -534,44 +559,39 @@ do {                                                                      \
 
 DECLARE_PER_CPU(int, hardirqs_enabled);
 DECLARE_PER_CPU(int, hardirq_context);
+DECLARE_PER_CPU(unsigned int, lockdep_recursion);
 
-/*
- * The below lockdep_assert_*() macros use raw_cpu_read() to access the above
- * per-cpu variables. This is required because this_cpu_read() will potentially
- * call into preempt/irq-disable and that obviously isn't right. This is also
- * correct because when IRQs are enabled, it doesn't matter if we accidentally
- * read the value from our previous CPU.
- */
+#define __lockdep_enabled      (debug_locks && !this_cpu_read(lockdep_recursion))
 
 #define lockdep_assert_irqs_enabled()                                  \
 do {                                                                   \
-       WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled));   \
+       WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
 } while (0)
 
 #define lockdep_assert_irqs_disabled()                                 \
 do {                                                                   \
-       WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled));    \
+       WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
 } while (0)
 
 #define lockdep_assert_in_irq()                                                \
 do {                                                                   \
-       WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context));    \
+       WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
 } while (0)
 
 #define lockdep_assert_preemption_enabled()                            \
 do {                                                                   \
        WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
-                    debug_locks                        &&              \
+                    __lockdep_enabled                  &&              \
                     (preempt_count() != 0              ||              \
-                     !raw_cpu_read(hardirqs_enabled)));                \
+                     !this_cpu_read(hardirqs_enabled)));               \
 } while (0)
 
 #define lockdep_assert_preemption_disabled()                           \
 do {                                                                   \
        WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
-                    debug_locks                        &&              \
+                    __lockdep_enabled                  &&              \
                     (preempt_count() == 0              &&              \
-                     raw_cpu_read(hardirqs_enabled)));                 \
+                     this_cpu_read(hardirqs_enabled)));                \
 } while (0)
 
 #else