drm/amdgpu: Add support for RAS XGMI err query
[linux-2.6-microblaze.git] / kernel / locking / lockdep_internals.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * kernel/lockdep_internals.h
4  *
5  * Runtime locking correctness validator
6  *
7  * lockdep subsystem internal functions and variables.
8  */
9
10 /*
11  * Lock-class usage-state bits:
12  */
13 enum lock_usage_bit {
14 #define LOCKDEP_STATE(__STATE)          \
15         LOCK_USED_IN_##__STATE,         \
16         LOCK_USED_IN_##__STATE##_READ,  \
17         LOCK_ENABLED_##__STATE,         \
18         LOCK_ENABLED_##__STATE##_READ,
19 #include "lockdep_states.h"
20 #undef LOCKDEP_STATE
21         LOCK_USED,
22         LOCK_USED_READ,
23         LOCK_USAGE_STATES,
24 };
25
26 /* states after LOCK_USED_READ are not traced and printed */
27 static_assert(LOCK_TRACE_STATES == LOCK_USAGE_STATES);
28
29 #define LOCK_USAGE_READ_MASK 1
30 #define LOCK_USAGE_DIR_MASK  2
31 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
32
33 /*
34  * Usage-state bitmasks:
35  */
36 #define __LOCKF(__STATE)        LOCKF_##__STATE = (1 << LOCK_##__STATE),
37
38 enum {
39 #define LOCKDEP_STATE(__STATE)                                          \
40         __LOCKF(USED_IN_##__STATE)                                      \
41         __LOCKF(USED_IN_##__STATE##_READ)                               \
42         __LOCKF(ENABLED_##__STATE)                                      \
43         __LOCKF(ENABLED_##__STATE##_READ)
44 #include "lockdep_states.h"
45 #undef LOCKDEP_STATE
46         __LOCKF(USED)
47         __LOCKF(USED_READ)
48 };
49
50 #define LOCKDEP_STATE(__STATE)  LOCKF_ENABLED_##__STATE |
51 static const unsigned long LOCKF_ENABLED_IRQ =
52 #include "lockdep_states.h"
53         0;
54 #undef LOCKDEP_STATE
55
56 #define LOCKDEP_STATE(__STATE)  LOCKF_USED_IN_##__STATE |
57 static const unsigned long LOCKF_USED_IN_IRQ =
58 #include "lockdep_states.h"
59         0;
60 #undef LOCKDEP_STATE
61
62 #define LOCKDEP_STATE(__STATE)  LOCKF_ENABLED_##__STATE##_READ |
63 static const unsigned long LOCKF_ENABLED_IRQ_READ =
64 #include "lockdep_states.h"
65         0;
66 #undef LOCKDEP_STATE
67
68 #define LOCKDEP_STATE(__STATE)  LOCKF_USED_IN_##__STATE##_READ |
69 static const unsigned long LOCKF_USED_IN_IRQ_READ =
70 #include "lockdep_states.h"
71         0;
72 #undef LOCKDEP_STATE
73
74 #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
75 #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
76
77 #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
78 #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
79
80 /*
81  * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
82  * .data and .bss to fit in required 32MB limit for the kernel. With
83  * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
84  * So, reduce the static allocations for lockdeps related structures so that
85  * everything fits in current required size limit.
86  */
87 #ifdef CONFIG_LOCKDEP_SMALL
88 /*
89  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
90  * we track.
91  *
92  * We use the per-lock dependency maps in two ways: we grow it by adding
93  * every to-be-taken lock to all currently held lock's own dependency
94  * table (if it's not there yet), and we check it for lock order
95  * conflicts and deadlocks.
96  */
97 #define MAX_LOCKDEP_ENTRIES     16384UL
98 #define MAX_LOCKDEP_CHAINS_BITS 15
99 #define MAX_STACK_TRACE_ENTRIES 262144UL
100 #define STACK_TRACE_HASH_SIZE   8192
101 #else
102 #define MAX_LOCKDEP_ENTRIES     (1UL << CONFIG_LOCKDEP_BITS)
103
104 #define MAX_LOCKDEP_CHAINS_BITS CONFIG_LOCKDEP_CHAINS_BITS
105
106 /*
107  * Stack-trace: tightly packed array of stack backtrace
108  * addresses. Protected by the hash_lock.
109  */
110 #define MAX_STACK_TRACE_ENTRIES (1UL << CONFIG_LOCKDEP_STACK_TRACE_BITS)
111 #define STACK_TRACE_HASH_SIZE   (1 << CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS)
112 #endif
113
114 /*
115  * Bit definitions for lock_chain.irq_context
116  */
117 #define LOCK_CHAIN_SOFTIRQ_CONTEXT      (1 << 0)
118 #define LOCK_CHAIN_HARDIRQ_CONTEXT      (1 << 1)
119
120 #define MAX_LOCKDEP_CHAINS      (1UL << MAX_LOCKDEP_CHAINS_BITS)
121
122 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
123
124 extern struct list_head all_lock_classes;
125 extern struct lock_chain lock_chains[];
126
127 #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
128
129 extern void get_usage_chars(struct lock_class *class,
130                             char usage[LOCK_USAGE_CHARS]);
131
132 extern const char *__get_key_name(const struct lockdep_subclass_key *key,
133                                   char *str);
134
135 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
136
137 extern unsigned long nr_lock_classes;
138 extern unsigned long nr_zapped_classes;
139 extern unsigned long nr_zapped_lock_chains;
140 extern unsigned long nr_list_entries;
141 long lockdep_next_lockchain(long i);
142 unsigned long lock_chain_count(void);
143 extern unsigned long nr_stack_trace_entries;
144
145 extern unsigned int nr_hardirq_chains;
146 extern unsigned int nr_softirq_chains;
147 extern unsigned int nr_process_chains;
148 extern unsigned int nr_free_chain_hlocks;
149 extern unsigned int nr_lost_chain_hlocks;
150 extern unsigned int nr_large_chain_blocks;
151
152 extern unsigned int max_lockdep_depth;
153 extern unsigned int max_bfs_queue_depth;
154
155 #ifdef CONFIG_PROVE_LOCKING
156 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
157 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
158 #ifdef CONFIG_TRACE_IRQFLAGS
159 u64 lockdep_stack_trace_count(void);
160 u64 lockdep_stack_hash_count(void);
161 #endif
162 #else
163 static inline unsigned long
164 lockdep_count_forward_deps(struct lock_class *class)
165 {
166         return 0;
167 }
168 static inline unsigned long
169 lockdep_count_backward_deps(struct lock_class *class)
170 {
171         return 0;
172 }
173 #endif
174
175 #ifdef CONFIG_DEBUG_LOCKDEP
176
177 #include <asm/local.h>
178 /*
179  * Various lockdep statistics.
180  * We want them per cpu as they are often accessed in fast path
181  * and we want to avoid too much cache bouncing.
182  */
183 struct lockdep_stats {
184         unsigned long  chain_lookup_hits;
185         unsigned int   chain_lookup_misses;
186         unsigned long  hardirqs_on_events;
187         unsigned long  hardirqs_off_events;
188         unsigned long  redundant_hardirqs_on;
189         unsigned long  redundant_hardirqs_off;
190         unsigned long  softirqs_on_events;
191         unsigned long  softirqs_off_events;
192         unsigned long  redundant_softirqs_on;
193         unsigned long  redundant_softirqs_off;
194         int            nr_unused_locks;
195         unsigned int   nr_redundant_checks;
196         unsigned int   nr_redundant;
197         unsigned int   nr_cyclic_checks;
198         unsigned int   nr_find_usage_forwards_checks;
199         unsigned int   nr_find_usage_backwards_checks;
200
201         /*
202          * Per lock class locking operation stat counts
203          */
204         unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
205 };
206
207 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
208 extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
209
210 #define __debug_atomic_inc(ptr)                                 \
211         this_cpu_inc(lockdep_stats.ptr);
212
213 #define debug_atomic_inc(ptr)                   {               \
214         WARN_ON_ONCE(!irqs_disabled());                         \
215         __this_cpu_inc(lockdep_stats.ptr);                      \
216 }
217
218 #define debug_atomic_dec(ptr)                   {               \
219         WARN_ON_ONCE(!irqs_disabled());                         \
220         __this_cpu_dec(lockdep_stats.ptr);                      \
221 }
222
223 #define debug_atomic_read(ptr)          ({                              \
224         struct lockdep_stats *__cpu_lockdep_stats;                      \
225         unsigned long long __total = 0;                                 \
226         int __cpu;                                                      \
227         for_each_possible_cpu(__cpu) {                                  \
228                 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);   \
229                 __total += __cpu_lockdep_stats->ptr;                    \
230         }                                                               \
231         __total;                                                        \
232 })
233
234 static inline void debug_class_ops_inc(struct lock_class *class)
235 {
236         int idx;
237
238         idx = class - lock_classes;
239         __debug_atomic_inc(lock_class_ops[idx]);
240 }
241
242 static inline unsigned long debug_class_ops_read(struct lock_class *class)
243 {
244         int idx, cpu;
245         unsigned long ops = 0;
246
247         idx = class - lock_classes;
248         for_each_possible_cpu(cpu)
249                 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
250         return ops;
251 }
252
253 #else
254 # define __debug_atomic_inc(ptr)        do { } while (0)
255 # define debug_atomic_inc(ptr)          do { } while (0)
256 # define debug_atomic_dec(ptr)          do { } while (0)
257 # define debug_atomic_read(ptr)         0
258 # define debug_class_ops_inc(ptr)       do { } while (0)
259 #endif