Merge tag 'v5.7-rc1' into locking/kcsan, to resolve conflicts and refresh
[linux-2.6-microblaze.git] / kernel / locking / lockdep_internals.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * kernel/lockdep_internals.h
4  *
5  * Runtime locking correctness validator
6  *
7  * lockdep subsystem internal functions and variables.
8  */
9
10 /*
11  * Lock-class usage-state bits:
12  */
13 enum lock_usage_bit {
14 #define LOCKDEP_STATE(__STATE)          \
15         LOCK_USED_IN_##__STATE,         \
16         LOCK_USED_IN_##__STATE##_READ,  \
17         LOCK_ENABLED_##__STATE,         \
18         LOCK_ENABLED_##__STATE##_READ,
19 #include "lockdep_states.h"
20 #undef LOCKDEP_STATE
21         LOCK_USED,
22         LOCK_USAGE_STATES
23 };
24
25 #define LOCK_USAGE_READ_MASK 1
26 #define LOCK_USAGE_DIR_MASK  2
27 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
28
29 /*
30  * Usage-state bitmasks:
31  */
32 #define __LOCKF(__STATE)        LOCKF_##__STATE = (1 << LOCK_##__STATE),
33
34 enum {
35 #define LOCKDEP_STATE(__STATE)                                          \
36         __LOCKF(USED_IN_##__STATE)                                      \
37         __LOCKF(USED_IN_##__STATE##_READ)                               \
38         __LOCKF(ENABLED_##__STATE)                                      \
39         __LOCKF(ENABLED_##__STATE##_READ)
40 #include "lockdep_states.h"
41 #undef LOCKDEP_STATE
42         __LOCKF(USED)
43 };
44
45 #define LOCKDEP_STATE(__STATE)  LOCKF_ENABLED_##__STATE |
46 static const unsigned long LOCKF_ENABLED_IRQ =
47 #include "lockdep_states.h"
48         0;
49 #undef LOCKDEP_STATE
50
51 #define LOCKDEP_STATE(__STATE)  LOCKF_USED_IN_##__STATE |
52 static const unsigned long LOCKF_USED_IN_IRQ =
53 #include "lockdep_states.h"
54         0;
55 #undef LOCKDEP_STATE
56
57 #define LOCKDEP_STATE(__STATE)  LOCKF_ENABLED_##__STATE##_READ |
58 static const unsigned long LOCKF_ENABLED_IRQ_READ =
59 #include "lockdep_states.h"
60         0;
61 #undef LOCKDEP_STATE
62
63 #define LOCKDEP_STATE(__STATE)  LOCKF_USED_IN_##__STATE##_READ |
64 static const unsigned long LOCKF_USED_IN_IRQ_READ =
65 #include "lockdep_states.h"
66         0;
67 #undef LOCKDEP_STATE
68
69 #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
70 #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
71
72 #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
73 #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
74
75 /*
76  * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
77  * .data and .bss to fit in required 32MB limit for the kernel. With
78  * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
79  * So, reduce the static allocations for lockdeps related structures so that
80  * everything fits in current required size limit.
81  */
82 #ifdef CONFIG_LOCKDEP_SMALL
83 /*
84  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
85  * we track.
86  *
87  * We use the per-lock dependency maps in two ways: we grow it by adding
88  * every to-be-taken lock to all currently held lock's own dependency
89  * table (if it's not there yet), and we check it for lock order
90  * conflicts and deadlocks.
91  */
92 #define MAX_LOCKDEP_ENTRIES     16384UL
93 #define MAX_LOCKDEP_CHAINS_BITS 15
94 #define MAX_STACK_TRACE_ENTRIES 262144UL
95 #define STACK_TRACE_HASH_SIZE   8192
96 #else
97 #define MAX_LOCKDEP_ENTRIES     32768UL
98
99 #define MAX_LOCKDEP_CHAINS_BITS 16
100
101 /*
102  * Stack-trace: tightly packed array of stack backtrace
103  * addresses. Protected by the hash_lock.
104  */
105 #define MAX_STACK_TRACE_ENTRIES 524288UL
106 #define STACK_TRACE_HASH_SIZE   16384
107 #endif
108
109 /*
110  * Bit definitions for lock_chain.irq_context
111  */
112 #define LOCK_CHAIN_SOFTIRQ_CONTEXT      (1 << 0)
113 #define LOCK_CHAIN_HARDIRQ_CONTEXT      (1 << 1)
114
115 #define MAX_LOCKDEP_CHAINS      (1UL << MAX_LOCKDEP_CHAINS_BITS)
116
117 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
118
119 extern struct list_head all_lock_classes;
120 extern struct lock_chain lock_chains[];
121
122 #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
123
124 extern void get_usage_chars(struct lock_class *class,
125                             char usage[LOCK_USAGE_CHARS]);
126
127 extern const char *__get_key_name(const struct lockdep_subclass_key *key,
128                                   char *str);
129
130 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
131
132 extern unsigned long nr_lock_classes;
133 extern unsigned long nr_zapped_classes;
134 extern unsigned long nr_zapped_lock_chains;
135 extern unsigned long nr_list_entries;
136 long lockdep_next_lockchain(long i);
137 unsigned long lock_chain_count(void);
138 extern unsigned long nr_stack_trace_entries;
139
140 extern unsigned int nr_hardirq_chains;
141 extern unsigned int nr_softirq_chains;
142 extern unsigned int nr_process_chains;
143 extern unsigned int nr_free_chain_hlocks;
144 extern unsigned int nr_lost_chain_hlocks;
145 extern unsigned int nr_large_chain_blocks;
146
147 extern unsigned int max_lockdep_depth;
148 extern unsigned int max_bfs_queue_depth;
149
150 #ifdef CONFIG_PROVE_LOCKING
151 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
152 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
153 #ifdef CONFIG_TRACE_IRQFLAGS
154 u64 lockdep_stack_trace_count(void);
155 u64 lockdep_stack_hash_count(void);
156 #endif
157 #else
158 static inline unsigned long
159 lockdep_count_forward_deps(struct lock_class *class)
160 {
161         return 0;
162 }
163 static inline unsigned long
164 lockdep_count_backward_deps(struct lock_class *class)
165 {
166         return 0;
167 }
168 #endif
169
170 #ifdef CONFIG_DEBUG_LOCKDEP
171
172 #include <asm/local.h>
173 /*
174  * Various lockdep statistics.
175  * We want them per cpu as they are often accessed in fast path
176  * and we want to avoid too much cache bouncing.
177  */
178 struct lockdep_stats {
179         unsigned long  chain_lookup_hits;
180         unsigned int   chain_lookup_misses;
181         unsigned long  hardirqs_on_events;
182         unsigned long  hardirqs_off_events;
183         unsigned long  redundant_hardirqs_on;
184         unsigned long  redundant_hardirqs_off;
185         unsigned long  softirqs_on_events;
186         unsigned long  softirqs_off_events;
187         unsigned long  redundant_softirqs_on;
188         unsigned long  redundant_softirqs_off;
189         int            nr_unused_locks;
190         unsigned int   nr_redundant_checks;
191         unsigned int   nr_redundant;
192         unsigned int   nr_cyclic_checks;
193         unsigned int   nr_find_usage_forwards_checks;
194         unsigned int   nr_find_usage_backwards_checks;
195
196         /*
197          * Per lock class locking operation stat counts
198          */
199         unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
200 };
201
202 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
203 extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
204
205 #define __debug_atomic_inc(ptr)                                 \
206         this_cpu_inc(lockdep_stats.ptr);
207
208 #define debug_atomic_inc(ptr)                   {               \
209         WARN_ON_ONCE(!irqs_disabled());                         \
210         __this_cpu_inc(lockdep_stats.ptr);                      \
211 }
212
213 #define debug_atomic_dec(ptr)                   {               \
214         WARN_ON_ONCE(!irqs_disabled());                         \
215         __this_cpu_dec(lockdep_stats.ptr);                      \
216 }
217
218 #define debug_atomic_read(ptr)          ({                              \
219         struct lockdep_stats *__cpu_lockdep_stats;                      \
220         unsigned long long __total = 0;                                 \
221         int __cpu;                                                      \
222         for_each_possible_cpu(__cpu) {                                  \
223                 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);   \
224                 __total += __cpu_lockdep_stats->ptr;                    \
225         }                                                               \
226         __total;                                                        \
227 })
228
229 static inline void debug_class_ops_inc(struct lock_class *class)
230 {
231         int idx;
232
233         idx = class - lock_classes;
234         __debug_atomic_inc(lock_class_ops[idx]);
235 }
236
237 static inline unsigned long debug_class_ops_read(struct lock_class *class)
238 {
239         int idx, cpu;
240         unsigned long ops = 0;
241
242         idx = class - lock_classes;
243         for_each_possible_cpu(cpu)
244                 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
245         return ops;
246 }
247
248 #else
249 # define __debug_atomic_inc(ptr)        do { } while (0)
250 # define debug_atomic_inc(ptr)          do { } while (0)
251 # define debug_atomic_dec(ptr)          do { } while (0)
252 # define debug_atomic_read(ptr)         0
253 # define debug_class_ops_inc(ptr)       do { } while (0)
254 #endif