1 /* SPDX-License-Identifier: GPL-2.0 */
3 * kernel/lockdep_internals.h
5 * Runtime locking correctness validator
7 * lockdep subsystem internal functions and variables.
11 * Lock-class usage-state bits:
14 #define LOCKDEP_STATE(__STATE) \
15 LOCK_USED_IN_##__STATE, \
16 LOCK_USED_IN_##__STATE##_READ, \
17 LOCK_ENABLED_##__STATE, \
18 LOCK_ENABLED_##__STATE##_READ,
19 #include "lockdep_states.h"
25 #define LOCK_USAGE_READ_MASK 1
26 #define LOCK_USAGE_DIR_MASK 2
27 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
30 * Usage-state bitmasks:
32 #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
35 #define LOCKDEP_STATE(__STATE) \
36 __LOCKF(USED_IN_##__STATE) \
37 __LOCKF(USED_IN_##__STATE##_READ) \
38 __LOCKF(ENABLED_##__STATE) \
39 __LOCKF(ENABLED_##__STATE##_READ)
40 #include "lockdep_states.h"
45 #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
46 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
48 #define LOCKF_ENABLED_IRQ_READ \
49 (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
50 #define LOCKF_USED_IN_IRQ_READ \
51 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
54 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
55 * .data and .bss to fit in required 32MB limit for the kernel. With
56 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
57 * So, reduce the static allocations for lockdeps related structures so that
58 * everything fits in current required size limit.
60 #ifdef CONFIG_LOCKDEP_SMALL
62 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
65 * We use the per-lock dependency maps in two ways: we grow it by adding
66 * every to-be-taken lock to all currently held lock's own dependency
67 * table (if it's not there yet), and we check it for lock order
68 * conflicts and deadlocks.
70 #define MAX_LOCKDEP_ENTRIES 16384UL
71 #define MAX_LOCKDEP_CHAINS_BITS 15
72 #define MAX_STACK_TRACE_ENTRIES 262144UL
74 #define MAX_LOCKDEP_ENTRIES 32768UL
76 #define MAX_LOCKDEP_CHAINS_BITS 16
79 * Stack-trace: tightly packed array of stack backtrace
80 * addresses. Protected by the hash_lock.
82 #define MAX_STACK_TRACE_ENTRIES 524288UL
85 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
87 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
89 extern struct list_head all_lock_classes;
90 extern struct lock_chain lock_chains[];
92 #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
94 extern void get_usage_chars(struct lock_class *class,
95 char usage[LOCK_USAGE_CHARS]);
97 extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
99 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
101 extern unsigned long nr_lock_classes;
102 extern unsigned long nr_list_entries;
103 long lockdep_next_lockchain(long i);
104 unsigned long lock_chain_count(void);
105 extern int nr_chain_hlocks;
106 extern unsigned long nr_stack_trace_entries;
108 extern unsigned int nr_hardirq_chains;
109 extern unsigned int nr_softirq_chains;
110 extern unsigned int nr_process_chains;
111 extern unsigned int max_lockdep_depth;
112 extern unsigned int max_recursion_depth;
114 extern unsigned int max_bfs_queue_depth;
116 #ifdef CONFIG_PROVE_LOCKING
117 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
118 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
120 static inline unsigned long
121 lockdep_count_forward_deps(struct lock_class *class)
125 static inline unsigned long
126 lockdep_count_backward_deps(struct lock_class *class)
132 #ifdef CONFIG_DEBUG_LOCKDEP
134 #include <asm/local.h>
136 * Various lockdep statistics.
137 * We want them per cpu as they are often accessed in fast path
138 * and we want to avoid too much cache bouncing.
140 struct lockdep_stats {
141 int chain_lookup_hits;
142 int chain_lookup_misses;
143 int hardirqs_on_events;
144 int hardirqs_off_events;
145 int redundant_hardirqs_on;
146 int redundant_hardirqs_off;
147 int softirqs_on_events;
148 int softirqs_off_events;
149 int redundant_softirqs_on;
150 int redundant_softirqs_off;
152 int nr_redundant_checks;
154 int nr_cyclic_checks;
155 int nr_cyclic_check_recursions;
156 int nr_find_usage_forwards_checks;
157 int nr_find_usage_forwards_recursions;
158 int nr_find_usage_backwards_checks;
159 int nr_find_usage_backwards_recursions;
162 * Per lock class locking operation stat counts
164 unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
167 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
168 extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
170 #define __debug_atomic_inc(ptr) \
171 this_cpu_inc(lockdep_stats.ptr);
173 #define debug_atomic_inc(ptr) { \
174 WARN_ON_ONCE(!irqs_disabled()); \
175 __this_cpu_inc(lockdep_stats.ptr); \
178 #define debug_atomic_dec(ptr) { \
179 WARN_ON_ONCE(!irqs_disabled()); \
180 __this_cpu_dec(lockdep_stats.ptr); \
183 #define debug_atomic_read(ptr) ({ \
184 struct lockdep_stats *__cpu_lockdep_stats; \
185 unsigned long long __total = 0; \
187 for_each_possible_cpu(__cpu) { \
188 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
189 __total += __cpu_lockdep_stats->ptr; \
194 static inline void debug_class_ops_inc(struct lock_class *class)
198 idx = class - lock_classes;
199 __debug_atomic_inc(lock_class_ops[idx]);
202 static inline unsigned long debug_class_ops_read(struct lock_class *class)
205 unsigned long ops = 0;
207 idx = class - lock_classes;
208 for_each_possible_cpu(cpu)
209 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
214 # define __debug_atomic_inc(ptr) do { } while (0)
215 # define debug_atomic_inc(ptr) do { } while (0)
216 # define debug_atomic_dec(ptr) do { } while (0)
217 # define debug_atomic_read(ptr) 0
218 # define debug_class_ops_inc(ptr) do { } while (0)