Merge tag 'drm-intel-next-2021-01-04' of git://anongit.freedesktop.org/drm/drm-intel...
[linux-2.6-microblaze.git] / include / linux / lockdep.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Runtime locking correctness validator
4  *
5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7  *
8  * see Documentation/locking/lockdep-design.rst for more details.
9  */
10 #ifndef __LINUX_LOCKDEP_H
11 #define __LINUX_LOCKDEP_H
12
13 #include <linux/lockdep_types.h>
14 #include <linux/smp.h>
15 #include <asm/percpu.h>
16
17 struct task_struct;
18
19 /* for sysctl */
20 extern int prove_locking;
21 extern int lock_stat;
22
23 #ifdef CONFIG_LOCKDEP
24
25 #include <linux/linkage.h>
26 #include <linux/list.h>
27 #include <linux/debug_locks.h>
28 #include <linux/stacktrace.h>
29
30 static inline void lockdep_copy_map(struct lockdep_map *to,
31                                     struct lockdep_map *from)
32 {
33         int i;
34
35         *to = *from;
36         /*
37          * Since the class cache can be modified concurrently we could observe
38          * half pointers (64bit arch using 32bit copy insns). Therefore clear
39          * the caches and take the performance hit.
40          *
41          * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
42          *     that relies on cache abuse.
43          */
44         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
45                 to->class_cache[i] = NULL;
46 }
47
48 /*
49  * Every lock has a list of other locks that were taken after it.
50  * We only grow the list, never remove from it:
51  */
52 struct lock_list {
53         struct list_head                entry;
54         struct lock_class               *class;
55         struct lock_class               *links_to;
56         const struct lock_trace         *trace;
57         u16                             distance;
58         /* bitmap of different dependencies from head to this */
59         u8                              dep;
60         /* used by BFS to record whether "prev -> this" only has -(*R)-> */
61         u8                              only_xr;
62
63         /*
64          * The parent field is used to implement breadth-first search, and the
65          * bit 0 is reused to indicate if the lock has been accessed in BFS.
66          */
67         struct lock_list                *parent;
68 };
69
70 /**
71  * struct lock_chain - lock dependency chain record
72  *
73  * @irq_context: the same as irq_context in held_lock below
74  * @depth:       the number of held locks in this chain
75  * @base:        the index in chain_hlocks for this chain
76  * @entry:       the collided lock chains in lock_chain hash list
77  * @chain_key:   the hash key of this lock_chain
78  */
79 struct lock_chain {
80         /* see BUILD_BUG_ON()s in add_chain_cache() */
81         unsigned int                    irq_context :  2,
82                                         depth       :  6,
83                                         base        : 24;
84         /* 4 byte hole */
85         struct hlist_node               entry;
86         u64                             chain_key;
87 };
88
89 #define MAX_LOCKDEP_KEYS_BITS           13
90 #define MAX_LOCKDEP_KEYS                (1UL << MAX_LOCKDEP_KEYS_BITS)
91 #define INITIAL_CHAIN_KEY               -1
92
93 struct held_lock {
94         /*
95          * One-way hash of the dependency chain up to this point. We
96          * hash the hashes step by step as the dependency chain grows.
97          *
98          * We use it for dependency-caching and we skip detection
99          * passes and dependency-updates if there is a cache-hit, so
100          * it is absolutely critical for 100% coverage of the validator
101          * to have a unique key value for every unique dependency path
102          * that can occur in the system, to make a unique hash value
103          * as likely as possible - hence the 64-bit width.
104          *
105          * The task struct holds the current hash value (initialized
106          * with zero), here we store the previous hash value:
107          */
108         u64                             prev_chain_key;
109         unsigned long                   acquire_ip;
110         struct lockdep_map              *instance;
111         struct lockdep_map              *nest_lock;
112 #ifdef CONFIG_LOCK_STAT
113         u64                             waittime_stamp;
114         u64                             holdtime_stamp;
115 #endif
116         /*
117          * class_idx is zero-indexed; it points to the element in
118          * lock_classes this held lock instance belongs to. class_idx is in
119          * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
120          */
121         unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
122         /*
123          * The lock-stack is unified in that the lock chains of interrupt
124          * contexts nest ontop of process context chains, but we 'separate'
125          * the hashes by starting with 0 if we cross into an interrupt
126          * context, and we also keep do not add cross-context lock
127          * dependencies - the lock usage graph walking covers that area
128          * anyway, and we'd just unnecessarily increase the number of
129          * dependencies otherwise. [Note: hardirq and softirq contexts
130          * are separated from each other too.]
131          *
132          * The following field is used to detect when we cross into an
133          * interrupt context:
134          */
135         unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
136         unsigned int trylock:1;                                         /* 16 bits */
137
138         unsigned int read:2;        /* see lock_acquire() comment */
139         unsigned int check:1;       /* see lock_acquire() comment */
140         unsigned int hardirqs_off:1;
141         unsigned int references:12;                                     /* 32 bits */
142         unsigned int pin_count;
143 };
144
145 /*
146  * Initialization, self-test and debugging-output methods:
147  */
148 extern void lockdep_init(void);
149 extern void lockdep_reset(void);
150 extern void lockdep_reset_lock(struct lockdep_map *lock);
151 extern void lockdep_free_key_range(void *start, unsigned long size);
152 extern asmlinkage void lockdep_sys_exit(void);
153 extern void lockdep_set_selftest_task(struct task_struct *task);
154
155 extern void lockdep_init_task(struct task_struct *task);
156
157 /*
158  * Split the recrursion counter in two to readily detect 'off' vs recursion.
159  */
160 #define LOCKDEP_RECURSION_BITS  16
161 #define LOCKDEP_OFF             (1U << LOCKDEP_RECURSION_BITS)
162 #define LOCKDEP_RECURSION_MASK  (LOCKDEP_OFF - 1)
163
164 /*
165  * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
166  * to header dependencies.
167  */
168
169 #define lockdep_off()                                   \
170 do {                                                    \
171         current->lockdep_recursion += LOCKDEP_OFF;      \
172 } while (0)
173
174 #define lockdep_on()                                    \
175 do {                                                    \
176         current->lockdep_recursion -= LOCKDEP_OFF;      \
177 } while (0)
178
179 extern void lockdep_register_key(struct lock_class_key *key);
180 extern void lockdep_unregister_key(struct lock_class_key *key);
181
182 /*
183  * These methods are used by specific locking variants (spinlocks,
184  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
185  * to lockdep:
186  */
187
188 extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
189         struct lock_class_key *key, int subclass, short inner, short outer);
190
191 static inline void
192 lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
193                       struct lock_class_key *key, int subclass, short inner)
194 {
195         lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
196 }
197
198 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
199                              struct lock_class_key *key, int subclass)
200 {
201         lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
202 }
203
204 /*
205  * Reinitialize a lock key - for cases where there is special locking or
206  * special initialization of locks so that the validator gets the scope
207  * of dependencies wrong: they are either too broad (they need a class-split)
208  * or they are too narrow (they suffer from a false class-split):
209  */
210 #define lockdep_set_class(lock, key)                            \
211         lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0,  \
212                                (lock)->dep_map.wait_type_inner, \
213                                (lock)->dep_map.wait_type_outer)
214
215 #define lockdep_set_class_and_name(lock, key, name)             \
216         lockdep_init_map_waits(&(lock)->dep_map, name, key, 0,  \
217                                (lock)->dep_map.wait_type_inner, \
218                                (lock)->dep_map.wait_type_outer)
219
220 #define lockdep_set_class_and_subclass(lock, key, sub)          \
221         lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
222                                (lock)->dep_map.wait_type_inner, \
223                                (lock)->dep_map.wait_type_outer)
224
225 #define lockdep_set_subclass(lock, sub)                                 \
226         lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
227                                (lock)->dep_map.wait_type_inner,         \
228                                (lock)->dep_map.wait_type_outer)
229
230 #define lockdep_set_novalidate_class(lock) \
231         lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
232
233 /*
234  * Compare locking classes
235  */
236 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
237
238 static inline int lockdep_match_key(struct lockdep_map *lock,
239                                     struct lock_class_key *key)
240 {
241         return lock->key == key;
242 }
243
244 /*
245  * Acquire a lock.
246  *
247  * Values for "read":
248  *
249  *   0: exclusive (write) acquire
250  *   1: read-acquire (no recursion allowed)
251  *   2: read-acquire with same-instance recursion allowed
252  *
253  * Values for check:
254  *
255  *   0: simple checks (freeing, held-at-exit-time, etc.)
256  *   1: full validation
257  */
258 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
259                          int trylock, int read, int check,
260                          struct lockdep_map *nest_lock, unsigned long ip);
261
262 extern void lock_release(struct lockdep_map *lock, unsigned long ip);
263
264 /*
265  * Same "read" as for lock_acquire(), except -1 means any.
266  */
267 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
268
269 static inline int lock_is_held(const struct lockdep_map *lock)
270 {
271         return lock_is_held_type(lock, -1);
272 }
273
274 #define lockdep_is_held(lock)           lock_is_held(&(lock)->dep_map)
275 #define lockdep_is_held_type(lock, r)   lock_is_held_type(&(lock)->dep_map, (r))
276
277 extern void lock_set_class(struct lockdep_map *lock, const char *name,
278                            struct lock_class_key *key, unsigned int subclass,
279                            unsigned long ip);
280
281 static inline void lock_set_subclass(struct lockdep_map *lock,
282                 unsigned int subclass, unsigned long ip)
283 {
284         lock_set_class(lock, lock->name, lock->key, subclass, ip);
285 }
286
287 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
288
289 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
290
291 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
292 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
293 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
294
295 #define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
296
297 #define lockdep_assert_held(l)  do {                            \
298                 WARN_ON(debug_locks && !lockdep_is_held(l));    \
299         } while (0)
300
301 #define lockdep_assert_held_write(l)    do {                    \
302                 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));    \
303         } while (0)
304
305 #define lockdep_assert_held_read(l)     do {                            \
306                 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));    \
307         } while (0)
308
309 #define lockdep_assert_held_once(l)     do {                            \
310                 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));       \
311         } while (0)
312
313 #define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
314
315 #define lockdep_pin_lock(l)     lock_pin_lock(&(l)->dep_map)
316 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
317 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
318
319 #else /* !CONFIG_LOCKDEP */
320
321 static inline void lockdep_init_task(struct task_struct *task)
322 {
323 }
324
325 static inline void lockdep_off(void)
326 {
327 }
328
329 static inline void lockdep_on(void)
330 {
331 }
332
333 static inline void lockdep_set_selftest_task(struct task_struct *task)
334 {
335 }
336
337 # define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
338 # define lock_release(l, i)                     do { } while (0)
339 # define lock_downgrade(l, i)                   do { } while (0)
340 # define lock_set_class(l, n, k, s, i)          do { } while (0)
341 # define lock_set_subclass(l, s, i)             do { } while (0)
342 # define lockdep_init()                         do { } while (0)
343 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
344                 do { (void)(name); (void)(key); } while (0)
345 # define lockdep_init_map_wait(lock, name, key, sub, inner) \
346                 do { (void)(name); (void)(key); } while (0)
347 # define lockdep_init_map(lock, name, key, sub) \
348                 do { (void)(name); (void)(key); } while (0)
349 # define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
350 # define lockdep_set_class_and_name(lock, key, name) \
351                 do { (void)(key); (void)(name); } while (0)
352 #define lockdep_set_class_and_subclass(lock, key, sub) \
353                 do { (void)(key); } while (0)
354 #define lockdep_set_subclass(lock, sub)         do { } while (0)
355
356 #define lockdep_set_novalidate_class(lock) do { } while (0)
357
358 /*
359  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
360  * case since the result is not well defined and the caller should rather
361  * #ifdef the call himself.
362  */
363
364 # define lockdep_reset()                do { debug_locks = 1; } while (0)
365 # define lockdep_free_key_range(start, size)    do { } while (0)
366 # define lockdep_sys_exit()                     do { } while (0)
367
368 static inline void lockdep_register_key(struct lock_class_key *key)
369 {
370 }
371
372 static inline void lockdep_unregister_key(struct lock_class_key *key)
373 {
374 }
375
376 #define lockdep_depth(tsk)      (0)
377
378 /*
379  * Dummy forward declarations, allow users to write less ifdef-y code
380  * and depend on dead code elimination.
381  */
382 extern int lock_is_held(const void *);
383 extern int lockdep_is_held(const void *);
384 #define lockdep_is_held_type(l, r)              (1)
385
386 #define lockdep_assert_held(l)                  do { (void)(l); } while (0)
387 #define lockdep_assert_held_write(l)    do { (void)(l); } while (0)
388 #define lockdep_assert_held_read(l)             do { (void)(l); } while (0)
389 #define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
390
391 #define lockdep_recursing(tsk)                  (0)
392
393 #define NIL_COOKIE (struct pin_cookie){ }
394
395 #define lockdep_pin_lock(l)                     ({ struct pin_cookie cookie = { }; cookie; })
396 #define lockdep_repin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
397 #define lockdep_unpin_lock(l, c)                do { (void)(l); (void)(c); } while (0)
398
399 #endif /* !LOCKDEP */
400
401 enum xhlock_context_t {
402         XHLOCK_HARD,
403         XHLOCK_SOFT,
404         XHLOCK_CTX_NR,
405 };
406
407 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
408 /*
409  * To initialize a lockdep_map statically use this macro.
410  * Note that _name must not be NULL.
411  */
412 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
413         { .name = (_name), .key = (void *)(_key), }
414
415 static inline void lockdep_invariant_state(bool force) {}
416 static inline void lockdep_free_task(struct task_struct *task) {}
417
418 #ifdef CONFIG_LOCK_STAT
419
420 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
421 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
422
423 #define LOCK_CONTENDED(_lock, try, lock)                        \
424 do {                                                            \
425         if (!try(_lock)) {                                      \
426                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
427                 lock(_lock);                                    \
428         }                                                       \
429         lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
430 } while (0)
431
432 #define LOCK_CONTENDED_RETURN(_lock, try, lock)                 \
433 ({                                                              \
434         int ____err = 0;                                        \
435         if (!try(_lock)) {                                      \
436                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
437                 ____err = lock(_lock);                          \
438         }                                                       \
439         if (!____err)                                           \
440                 lock_acquired(&(_lock)->dep_map, _RET_IP_);     \
441         ____err;                                                \
442 })
443
444 #else /* CONFIG_LOCK_STAT */
445
446 #define lock_contended(lockdep_map, ip) do {} while (0)
447 #define lock_acquired(lockdep_map, ip) do {} while (0)
448
449 #define LOCK_CONTENDED(_lock, try, lock) \
450         lock(_lock)
451
452 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
453         lock(_lock)
454
455 #endif /* CONFIG_LOCK_STAT */
456
457 #ifdef CONFIG_LOCKDEP
458
459 /*
460  * On lockdep we dont want the hand-coded irq-enable of
461  * _raw_*_lock_flags() code, because lockdep assumes
462  * that interrupts are not re-enabled during lock-acquire:
463  */
464 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
465         LOCK_CONTENDED((_lock), (try), (lock))
466
467 #else /* CONFIG_LOCKDEP */
468
469 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
470         lockfl((_lock), (flags))
471
472 #endif /* CONFIG_LOCKDEP */
473
474 #ifdef CONFIG_PROVE_LOCKING
475 extern void print_irqtrace_events(struct task_struct *curr);
476 #else
477 static inline void print_irqtrace_events(struct task_struct *curr)
478 {
479 }
480 #endif
481
482 /* Variable used to make lockdep treat read_lock() as recursive in selftests */
483 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
484 extern unsigned int force_read_lock_recursive;
485 #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
486 #define force_read_lock_recursive 0
487 #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
488
489 #ifdef CONFIG_LOCKDEP
490 extern bool read_lock_is_recursive(void);
491 #else /* CONFIG_LOCKDEP */
492 /* If !LOCKDEP, the value is meaningless */
493 #define read_lock_is_recursive() 0
494 #endif
495
496 /*
497  * For trivial one-depth nesting of a lock-class, the following
498  * global define can be used. (Subsystems with multiple levels
499  * of nesting should define their own lock-nesting subclasses.)
500  */
501 #define SINGLE_DEPTH_NESTING                    1
502
503 /*
504  * Map the dependency ops to NOP or to real lockdep ops, depending
505  * on the per lock-class debug mode:
506  */
507
508 #define lock_acquire_exclusive(l, s, t, n, i)           lock_acquire(l, s, t, 0, 1, n, i)
509 #define lock_acquire_shared(l, s, t, n, i)              lock_acquire(l, s, t, 1, 1, n, i)
510 #define lock_acquire_shared_recursive(l, s, t, n, i)    lock_acquire(l, s, t, 2, 1, n, i)
511
512 #define spin_acquire(l, s, t, i)                lock_acquire_exclusive(l, s, t, NULL, i)
513 #define spin_acquire_nest(l, s, t, n, i)        lock_acquire_exclusive(l, s, t, n, i)
514 #define spin_release(l, i)                      lock_release(l, i)
515
516 #define rwlock_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
517 #define rwlock_acquire_read(l, s, t, i)                                 \
518 do {                                                                    \
519         if (read_lock_is_recursive())                                   \
520                 lock_acquire_shared_recursive(l, s, t, NULL, i);        \
521         else                                                            \
522                 lock_acquire_shared(l, s, t, NULL, i);                  \
523 } while (0)
524
525 #define rwlock_release(l, i)                    lock_release(l, i)
526
527 #define seqcount_acquire(l, s, t, i)            lock_acquire_exclusive(l, s, t, NULL, i)
528 #define seqcount_acquire_read(l, s, t, i)       lock_acquire_shared_recursive(l, s, t, NULL, i)
529 #define seqcount_release(l, i)                  lock_release(l, i)
530
531 #define mutex_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
532 #define mutex_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
533 #define mutex_release(l, i)                     lock_release(l, i)
534
535 #define rwsem_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
536 #define rwsem_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
537 #define rwsem_acquire_read(l, s, t, i)          lock_acquire_shared(l, s, t, NULL, i)
538 #define rwsem_release(l, i)                     lock_release(l, i)
539
540 #define lock_map_acquire(l)                     lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
541 #define lock_map_acquire_read(l)                lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
542 #define lock_map_acquire_tryread(l)             lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
543 #define lock_map_release(l)                     lock_release(l, _THIS_IP_)
544
545 #ifdef CONFIG_PROVE_LOCKING
546 # define might_lock(lock)                                               \
547 do {                                                                    \
548         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
549         lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
550         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
551 } while (0)
552 # define might_lock_read(lock)                                          \
553 do {                                                                    \
554         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
555         lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
556         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
557 } while (0)
558 # define might_lock_nested(lock, subclass)                              \
559 do {                                                                    \
560         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
561         lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,         \
562                      _THIS_IP_);                                        \
563         lock_release(&(lock)->dep_map, _THIS_IP_);                      \
564 } while (0)
565
566 DECLARE_PER_CPU(int, hardirqs_enabled);
567 DECLARE_PER_CPU(int, hardirq_context);
568 DECLARE_PER_CPU(unsigned int, lockdep_recursion);
569
570 #define __lockdep_enabled       (debug_locks && !this_cpu_read(lockdep_recursion))
571
572 #define lockdep_assert_irqs_enabled()                                   \
573 do {                                                                    \
574         WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
575 } while (0)
576
577 #define lockdep_assert_irqs_disabled()                                  \
578 do {                                                                    \
579         WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
580 } while (0)
581
582 #define lockdep_assert_in_irq()                                         \
583 do {                                                                    \
584         WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
585 } while (0)
586
587 #define lockdep_assert_preemption_enabled()                             \
588 do {                                                                    \
589         WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
590                      __lockdep_enabled                  &&              \
591                      (preempt_count() != 0              ||              \
592                       !this_cpu_read(hardirqs_enabled)));               \
593 } while (0)
594
595 #define lockdep_assert_preemption_disabled()                            \
596 do {                                                                    \
597         WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
598                      __lockdep_enabled                  &&              \
599                      (preempt_count() == 0              &&              \
600                       this_cpu_read(hardirqs_enabled)));                \
601 } while (0)
602
603 /*
604  * Acceptable for protecting per-CPU resources accessed from BH.
605  * Much like in_softirq() - semantics are ambiguous, use carefully.
606  */
607 #define lockdep_assert_in_softirq()                                     \
608 do {                                                                    \
609         WARN_ON_ONCE(__lockdep_enabled                  &&              \
610                      (!in_softirq() || in_irq() || in_nmi()));          \
611 } while (0)
612
613 #else
614 # define might_lock(lock) do { } while (0)
615 # define might_lock_read(lock) do { } while (0)
616 # define might_lock_nested(lock, subclass) do { } while (0)
617
618 # define lockdep_assert_irqs_enabled() do { } while (0)
619 # define lockdep_assert_irqs_disabled() do { } while (0)
620 # define lockdep_assert_in_irq() do { } while (0)
621
622 # define lockdep_assert_preemption_enabled() do { } while (0)
623 # define lockdep_assert_preemption_disabled() do { } while (0)
624 # define lockdep_assert_in_softirq() do { } while (0)
625 #endif
626
627 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
628
629 # define lockdep_assert_RT_in_threaded_ctx() do {                       \
630                 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
631                           lockdep_hardirq_context() &&                  \
632                           !(current->hardirq_threaded || current->irq_config),  \
633                           "Not in threaded context on PREEMPT_RT as expected\n");       \
634 } while (0)
635
636 #else
637
638 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
639
640 #endif
641
642 #ifdef CONFIG_LOCKDEP
643 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
644 #else
645 static inline void
646 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
647 {
648 }
649 #endif
650
651 #endif /* __LINUX_LOCKDEP_H */