Merge tag 'trace-v5.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux-2.6-microblaze.git] / include / linux / lockdep.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Runtime locking correctness validator
4  *
5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7  *
8  * see Documentation/locking/lockdep-design.rst for more details.
9  */
10 #ifndef __LINUX_LOCKDEP_H
11 #define __LINUX_LOCKDEP_H
12
13 #include <linux/lockdep_types.h>
14 #include <linux/smp.h>
15 #include <asm/percpu.h>
16
17 struct task_struct;
18
19 /* for sysctl */
20 extern int prove_locking;
21 extern int lock_stat;
22
23 #ifdef CONFIG_LOCKDEP
24
25 #include <linux/linkage.h>
26 #include <linux/list.h>
27 #include <linux/debug_locks.h>
28 #include <linux/stacktrace.h>
29
30 static inline void lockdep_copy_map(struct lockdep_map *to,
31                                     struct lockdep_map *from)
32 {
33         int i;
34
35         *to = *from;
36         /*
37          * Since the class cache can be modified concurrently we could observe
38          * half pointers (64bit arch using 32bit copy insns). Therefore clear
39          * the caches and take the performance hit.
40          *
41          * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
42          *     that relies on cache abuse.
43          */
44         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
45                 to->class_cache[i] = NULL;
46 }
47
48 /*
49  * Every lock has a list of other locks that were taken after it.
50  * We only grow the list, never remove from it:
51  */
52 struct lock_list {
53         struct list_head                entry;
54         struct lock_class               *class;
55         struct lock_class               *links_to;
56         const struct lock_trace         *trace;
57         u16                             distance;
58         /* bitmap of different dependencies from head to this */
59         u8                              dep;
60         /* used by BFS to record whether "prev -> this" only has -(*R)-> */
61         u8                              only_xr;
62
63         /*
64          * The parent field is used to implement breadth-first search, and the
65          * bit 0 is reused to indicate if the lock has been accessed in BFS.
66          */
67         struct lock_list                *parent;
68 };
69
70 /**
71  * struct lock_chain - lock dependency chain record
72  *
73  * @irq_context: the same as irq_context in held_lock below
74  * @depth:       the number of held locks in this chain
75  * @base:        the index in chain_hlocks for this chain
76  * @entry:       the collided lock chains in lock_chain hash list
77  * @chain_key:   the hash key of this lock_chain
78  */
79 struct lock_chain {
80         /* see BUILD_BUG_ON()s in add_chain_cache() */
81         unsigned int                    irq_context :  2,
82                                         depth       :  6,
83                                         base        : 24;
84         /* 4 byte hole */
85         struct hlist_node               entry;
86         u64                             chain_key;
87 };
88
89 #define MAX_LOCKDEP_KEYS_BITS           13
90 #define MAX_LOCKDEP_KEYS                (1UL << MAX_LOCKDEP_KEYS_BITS)
91 #define INITIAL_CHAIN_KEY               -1
92
93 struct held_lock {
94         /*
95          * One-way hash of the dependency chain up to this point. We
96          * hash the hashes step by step as the dependency chain grows.
97          *
98          * We use it for dependency-caching and we skip detection
99          * passes and dependency-updates if there is a cache-hit, so
100          * it is absolutely critical for 100% coverage of the validator
101          * to have a unique key value for every unique dependency path
102          * that can occur in the system, to make a unique hash value
103          * as likely as possible - hence the 64-bit width.
104          *
105          * The task struct holds the current hash value (initialized
106          * with zero), here we store the previous hash value:
107          */
108         u64                             prev_chain_key;
109         unsigned long                   acquire_ip;
110         struct lockdep_map              *instance;
111         struct lockdep_map              *nest_lock;
112 #ifdef CONFIG_LOCK_STAT
113         u64                             waittime_stamp;
114         u64                             holdtime_stamp;
115 #endif
116         /*
117          * class_idx is zero-indexed; it points to the element in
118          * lock_classes this held lock instance belongs to. class_idx is in
119          * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
120          */
121         unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
122         /*
123          * The lock-stack is unified in that the lock chains of interrupt
124          * contexts nest ontop of process context chains, but we 'separate'
125          * the hashes by starting with 0 if we cross into an interrupt
126          * context, and we also keep do not add cross-context lock
127          * dependencies - the lock usage graph walking covers that area
128          * anyway, and we'd just unnecessarily increase the number of
129          * dependencies otherwise. [Note: hardirq and softirq contexts
130          * are separated from each other too.]
131          *
132          * The following field is used to detect when we cross into an
133          * interrupt context:
134          */
135         unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
136         unsigned int trylock:1;                                         /* 16 bits */
137
138         unsigned int read:2;        /* see lock_acquire() comment */
139         unsigned int check:1;       /* see lock_acquire() comment */
140         unsigned int hardirqs_off:1;
141         unsigned int references:12;                                     /* 32 bits */
142         unsigned int pin_count;
143 };
144
145 /*
146  * Initialization, self-test and debugging-output methods:
147  */
148 extern void lockdep_init(void);
149 extern void lockdep_reset(void);
150 extern void lockdep_reset_lock(struct lockdep_map *lock);
151 extern void lockdep_free_key_range(void *start, unsigned long size);
152 extern asmlinkage void lockdep_sys_exit(void);
153 extern void lockdep_set_selftest_task(struct task_struct *task);
154
155 extern void lockdep_init_task(struct task_struct *task);
156
157 /*
158  * Split the recursion counter in two to readily detect 'off' vs recursion.
159  */
160 #define LOCKDEP_RECURSION_BITS  16
161 #define LOCKDEP_OFF             (1U << LOCKDEP_RECURSION_BITS)
162 #define LOCKDEP_RECURSION_MASK  (LOCKDEP_OFF - 1)
163
164 /*
165  * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
166  * to header dependencies.
167  */
168
169 #define lockdep_off()                                   \
170 do {                                                    \
171         current->lockdep_recursion += LOCKDEP_OFF;      \
172 } while (0)
173
174 #define lockdep_on()                                    \
175 do {                                                    \
176         current->lockdep_recursion -= LOCKDEP_OFF;      \
177 } while (0)
178
179 extern void lockdep_register_key(struct lock_class_key *key);
180 extern void lockdep_unregister_key(struct lock_class_key *key);
181
182 /*
183  * These methods are used by specific locking variants (spinlocks,
184  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
185  * to lockdep:
186  */
187
188 extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
189         struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
190
191 static inline void
192 lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
193                        struct lock_class_key *key, int subclass, u8 inner, u8 outer)
194 {
195         lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
196 }
197
198 static inline void
199 lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
200                       struct lock_class_key *key, int subclass, u8 inner)
201 {
202         lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
203 }
204
205 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
206                              struct lock_class_key *key, int subclass)
207 {
208         lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
209 }
210
211 /*
212  * Reinitialize a lock key - for cases where there is special locking or
213  * special initialization of locks so that the validator gets the scope
214  * of dependencies wrong: they are either too broad (they need a class-split)
215  * or they are too narrow (they suffer from a false class-split):
216  */
217 #define lockdep_set_class(lock, key)                            \
218         lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0,  \
219                                (lock)->dep_map.wait_type_inner, \
220                                (lock)->dep_map.wait_type_outer)
221
222 #define lockdep_set_class_and_name(lock, key, name)             \
223         lockdep_init_map_waits(&(lock)->dep_map, name, key, 0,  \
224                                (lock)->dep_map.wait_type_inner, \
225                                (lock)->dep_map.wait_type_outer)
226
227 #define lockdep_set_class_and_subclass(lock, key, sub)          \
228         lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
229                                (lock)->dep_map.wait_type_inner, \
230                                (lock)->dep_map.wait_type_outer)
231
232 #define lockdep_set_subclass(lock, sub)                                 \
233         lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
234                                (lock)->dep_map.wait_type_inner,         \
235                                (lock)->dep_map.wait_type_outer)
236
237 #define lockdep_set_novalidate_class(lock) \
238         lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
239
240 /*
241  * Compare locking classes
242  */
243 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
244
245 static inline int lockdep_match_key(struct lockdep_map *lock,
246                                     struct lock_class_key *key)
247 {
248         return lock->key == key;
249 }
250
251 /*
252  * Acquire a lock.
253  *
254  * Values for "read":
255  *
256  *   0: exclusive (write) acquire
257  *   1: read-acquire (no recursion allowed)
258  *   2: read-acquire with same-instance recursion allowed
259  *
260  * Values for check:
261  *
262  *   0: simple checks (freeing, held-at-exit-time, etc.)
263  *   1: full validation
264  */
265 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
266                          int trylock, int read, int check,
267                          struct lockdep_map *nest_lock, unsigned long ip);
268
269 extern void lock_release(struct lockdep_map *lock, unsigned long ip);
270
271 /* lock_is_held_type() returns */
272 #define LOCK_STATE_UNKNOWN      -1
273 #define LOCK_STATE_NOT_HELD     0
274 #define LOCK_STATE_HELD         1
275
276 /*
277  * Same "read" as for lock_acquire(), except -1 means any.
278  */
279 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
280
281 static inline int lock_is_held(const struct lockdep_map *lock)
282 {
283         return lock_is_held_type(lock, -1);
284 }
285
286 #define lockdep_is_held(lock)           lock_is_held(&(lock)->dep_map)
287 #define lockdep_is_held_type(lock, r)   lock_is_held_type(&(lock)->dep_map, (r))
288
289 extern void lock_set_class(struct lockdep_map *lock, const char *name,
290                            struct lock_class_key *key, unsigned int subclass,
291                            unsigned long ip);
292
293 static inline void lock_set_subclass(struct lockdep_map *lock,
294                 unsigned int subclass, unsigned long ip)
295 {
296         lock_set_class(lock, lock->name, lock->key, subclass, ip);
297 }
298
299 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
300
301 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
302
303 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
304 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
305 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
306
307 #define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
308
309 #define lockdep_assert(cond)            \
310         do { WARN_ON(debug_locks && !(cond)); } while (0)
311
312 #define lockdep_assert_once(cond)       \
313         do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
314
315 #define lockdep_assert_held(l)          \
316         lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
317
318 #define lockdep_assert_not_held(l)      \
319         lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
320
321 #define lockdep_assert_held_write(l)    \
322         lockdep_assert(lockdep_is_held_type(l, 0))
323
324 #define lockdep_assert_held_read(l)     \
325         lockdep_assert(lockdep_is_held_type(l, 1))
326
327 #define lockdep_assert_held_once(l)             \
328         lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
329
330 #define lockdep_assert_none_held_once()         \
331         lockdep_assert_once(!current->lockdep_depth)
332
333 #define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
334
335 #define lockdep_pin_lock(l)     lock_pin_lock(&(l)->dep_map)
336 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
337 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
338
339 #else /* !CONFIG_LOCKDEP */
340
341 static inline void lockdep_init_task(struct task_struct *task)
342 {
343 }
344
345 static inline void lockdep_off(void)
346 {
347 }
348
349 static inline void lockdep_on(void)
350 {
351 }
352
353 static inline void lockdep_set_selftest_task(struct task_struct *task)
354 {
355 }
356
357 # define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
358 # define lock_release(l, i)                     do { } while (0)
359 # define lock_downgrade(l, i)                   do { } while (0)
360 # define lock_set_class(l, n, k, s, i)          do { } while (0)
361 # define lock_set_subclass(l, s, i)             do { } while (0)
362 # define lockdep_init()                         do { } while (0)
363 # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
364                 do { (void)(name); (void)(key); } while (0)
365 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
366                 do { (void)(name); (void)(key); } while (0)
367 # define lockdep_init_map_wait(lock, name, key, sub, inner) \
368                 do { (void)(name); (void)(key); } while (0)
369 # define lockdep_init_map(lock, name, key, sub) \
370                 do { (void)(name); (void)(key); } while (0)
371 # define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
372 # define lockdep_set_class_and_name(lock, key, name) \
373                 do { (void)(key); (void)(name); } while (0)
374 #define lockdep_set_class_and_subclass(lock, key, sub) \
375                 do { (void)(key); } while (0)
376 #define lockdep_set_subclass(lock, sub)         do { } while (0)
377
378 #define lockdep_set_novalidate_class(lock) do { } while (0)
379
380 /*
381  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
382  * case since the result is not well defined and the caller should rather
383  * #ifdef the call himself.
384  */
385
386 # define lockdep_reset()                do { debug_locks = 1; } while (0)
387 # define lockdep_free_key_range(start, size)    do { } while (0)
388 # define lockdep_sys_exit()                     do { } while (0)
389
390 static inline void lockdep_register_key(struct lock_class_key *key)
391 {
392 }
393
394 static inline void lockdep_unregister_key(struct lock_class_key *key)
395 {
396 }
397
398 #define lockdep_depth(tsk)      (0)
399
400 /*
401  * Dummy forward declarations, allow users to write less ifdef-y code
402  * and depend on dead code elimination.
403  */
404 extern int lock_is_held(const void *);
405 extern int lockdep_is_held(const void *);
406 #define lockdep_is_held_type(l, r)              (1)
407
408 #define lockdep_assert(c)                       do { } while (0)
409 #define lockdep_assert_once(c)                  do { } while (0)
410
411 #define lockdep_assert_held(l)                  do { (void)(l); } while (0)
412 #define lockdep_assert_not_held(l)              do { (void)(l); } while (0)
413 #define lockdep_assert_held_write(l)            do { (void)(l); } while (0)
414 #define lockdep_assert_held_read(l)             do { (void)(l); } while (0)
415 #define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
416 #define lockdep_assert_none_held_once() do { } while (0)
417
418 #define lockdep_recursing(tsk)                  (0)
419
420 #define NIL_COOKIE (struct pin_cookie){ }
421
422 #define lockdep_pin_lock(l)                     ({ struct pin_cookie cookie = { }; cookie; })
423 #define lockdep_repin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
424 #define lockdep_unpin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
425
426 #endif /* !LOCKDEP */
427
428 enum xhlock_context_t {
429         XHLOCK_HARD,
430         XHLOCK_SOFT,
431         XHLOCK_CTX_NR,
432 };
433
434 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
435 /*
436  * To initialize a lockdep_map statically use this macro.
437  * Note that _name must not be NULL.
438  */
439 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
440         { .name = (_name), .key = (void *)(_key), }
441
442 static inline void lockdep_invariant_state(bool force) {}
443 static inline void lockdep_free_task(struct task_struct *task) {}
444
445 #ifdef CONFIG_LOCK_STAT
446
447 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
448 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
449
450 #define LOCK_CONTENDED(_lock, try, lock)                        \
451 do {                                                            \
452         if (!try(_lock)) {                                      \
453                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
454                 lock(_lock);                                    \
455         }                                                       \
456         lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
457 } while (0)
458
459 #define LOCK_CONTENDED_RETURN(_lock, try, lock)                 \
460 ({                                                              \
461         int ____err = 0;                                        \
462         if (!try(_lock)) {                                      \
463                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
464                 ____err = lock(_lock);                          \
465         }                                                       \
466         if (!____err)                                           \
467                 lock_acquired(&(_lock)->dep_map, _RET_IP_);     \
468         ____err;                                                \
469 })
470
471 #else /* CONFIG_LOCK_STAT */
472
473 #define lock_contended(lockdep_map, ip) do {} while (0)
474 #define lock_acquired(lockdep_map, ip) do {} while (0)
475
476 #define LOCK_CONTENDED(_lock, try, lock) \
477         lock(_lock)
478
479 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
480         lock(_lock)
481
482 #endif /* CONFIG_LOCK_STAT */
483
484 #ifdef CONFIG_LOCKDEP
485
486 /*
487  * On lockdep we dont want the hand-coded irq-enable of
488  * _raw_*_lock_flags() code, because lockdep assumes
489  * that interrupts are not re-enabled during lock-acquire:
490  */
491 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
492         LOCK_CONTENDED((_lock), (try), (lock))
493
494 #else /* CONFIG_LOCKDEP */
495
496 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
497         lockfl((_lock), (flags))
498
499 #endif /* CONFIG_LOCKDEP */
500
501 #ifdef CONFIG_PROVE_LOCKING
502 extern void print_irqtrace_events(struct task_struct *curr);
503 #else
504 static inline void print_irqtrace_events(struct task_struct *curr)
505 {
506 }
507 #endif
508
509 /* Variable used to make lockdep treat read_lock() as recursive in selftests */
510 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
511 extern unsigned int force_read_lock_recursive;
512 #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
513 #define force_read_lock_recursive 0
514 #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
515
516 #ifdef CONFIG_LOCKDEP
517 extern bool read_lock_is_recursive(void);
518 #else /* CONFIG_LOCKDEP */
519 /* If !LOCKDEP, the value is meaningless */
520 #define read_lock_is_recursive() 0
521 #endif
522
523 /*
524  * For trivial one-depth nesting of a lock-class, the following
525  * global define can be used. (Subsystems with multiple levels
526  * of nesting should define their own lock-nesting subclasses.)
527  */
528 #define SINGLE_DEPTH_NESTING                    1
529
530 /*
531  * Map the dependency ops to NOP or to real lockdep ops, depending
532  * on the per lock-class debug mode:
533  */
534
535 #define lock_acquire_exclusive(l, s, t, n, i)           lock_acquire(l, s, t, 0, 1, n, i)
536 #define lock_acquire_shared(l, s, t, n, i)              lock_acquire(l, s, t, 1, 1, n, i)
537 #define lock_acquire_shared_recursive(l, s, t, n, i)    lock_acquire(l, s, t, 2, 1, n, i)
538
539 #define spin_acquire(l, s, t, i)                lock_acquire_exclusive(l, s, t, NULL, i)
540 #define spin_acquire_nest(l, s, t, n, i)        lock_acquire_exclusive(l, s, t, n, i)
541 #define spin_release(l, i)                      lock_release(l, i)
542
543 #define rwlock_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
544 #define rwlock_acquire_read(l, s, t, i)                                 \
545 do {                                                                    \
546         if (read_lock_is_recursive())                                   \
547                 lock_acquire_shared_recursive(l, s, t, NULL, i);        \
548         else                                                            \
549                 lock_acquire_shared(l, s, t, NULL, i);                  \
550 } while (0)
551
552 #define rwlock_release(l, i)                    lock_release(l, i)
553
554 #define seqcount_acquire(l, s, t, i)            lock_acquire_exclusive(l, s, t, NULL, i)
555 #define seqcount_acquire_read(l, s, t, i)       lock_acquire_shared_recursive(l, s, t, NULL, i)
556 #define seqcount_release(l, i)                  lock_release(l, i)
557
558 #define mutex_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
559 #define mutex_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
560 #define mutex_release(l, i)                     lock_release(l, i)
561
562 #define rwsem_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
563 #define rwsem_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
564 #define rwsem_acquire_read(l, s, t, i)          lock_acquire_shared(l, s, t, NULL, i)
565 #define rwsem_release(l, i)                     lock_release(l, i)
566
567 #define lock_map_acquire(l)                     lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
568 #define lock_map_acquire_read(l)                lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
569 #define lock_map_acquire_tryread(l)             lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
570 #define lock_map_release(l)                     lock_release(l, _THIS_IP_)
571
572 #ifdef CONFIG_PROVE_LOCKING
573 # define might_lock(lock)                                               \
574 do {                                                                    \
575         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
576         lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
577         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
578 } while (0)
579 # define might_lock_read(lock)                                          \
580 do {                                                                    \
581         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
582         lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
583         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
584 } while (0)
585 # define might_lock_nested(lock, subclass)                              \
586 do {                                                                    \
587         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
588         lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,         \
589                      _THIS_IP_);                                        \
590         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
591 } while (0)
592
593 DECLARE_PER_CPU(int, hardirqs_enabled);
594 DECLARE_PER_CPU(int, hardirq_context);
595 DECLARE_PER_CPU(unsigned int, lockdep_recursion);
596
597 #define __lockdep_enabled       (debug_locks && !this_cpu_read(lockdep_recursion))
598
599 #define lockdep_assert_irqs_enabled()                                   \
600 do {                                                                    \
601         WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
602 } while (0)
603
604 #define lockdep_assert_irqs_disabled()                                  \
605 do {                                                                    \
606         WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
607 } while (0)
608
609 #define lockdep_assert_in_irq()                                         \
610 do {                                                                    \
611         WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
612 } while (0)
613
614 #define lockdep_assert_preemption_enabled()                             \
615 do {                                                                    \
616         WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
617                      __lockdep_enabled                  &&              \
618                      (preempt_count() != 0              ||              \
619                       !this_cpu_read(hardirqs_enabled)));               \
620 } while (0)
621
622 #define lockdep_assert_preemption_disabled()                            \
623 do {                                                                    \
624         WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
625                      __lockdep_enabled                  &&              \
626                      (preempt_count() == 0              &&              \
627                       this_cpu_read(hardirqs_enabled)));                \
628 } while (0)
629
630 /*
631  * Acceptable for protecting per-CPU resources accessed from BH.
632  * Much like in_softirq() - semantics are ambiguous, use carefully.
633  */
634 #define lockdep_assert_in_softirq()                                     \
635 do {                                                                    \
636         WARN_ON_ONCE(__lockdep_enabled                  &&              \
637                      (!in_softirq() || in_irq() || in_nmi()));          \
638 } while (0)
639
640 #else
641 # define might_lock(lock) do { } while (0)
642 # define might_lock_read(lock) do { } while (0)
643 # define might_lock_nested(lock, subclass) do { } while (0)
644
645 # define lockdep_assert_irqs_enabled() do { } while (0)
646 # define lockdep_assert_irqs_disabled() do { } while (0)
647 # define lockdep_assert_in_irq() do { } while (0)
648
649 # define lockdep_assert_preemption_enabled() do { } while (0)
650 # define lockdep_assert_preemption_disabled() do { } while (0)
651 # define lockdep_assert_in_softirq() do { } while (0)
652 #endif
653
654 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
655
656 # define lockdep_assert_RT_in_threaded_ctx() do {                       \
657                 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
658                           lockdep_hardirq_context() &&                  \
659                           !(current->hardirq_threaded || current->irq_config),  \
660                           "Not in threaded context on PREEMPT_RT as expected\n");       \
661 } while (0)
662
663 #else
664
665 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
666
667 #endif
668
669 #ifdef CONFIG_LOCKDEP
670 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
671 #else
672 static inline void
673 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
674 {
675 }
676 #endif
677
678 #endif /* __LINUX_LOCKDEP_H */