1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Runtime locking correctness validator
5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
8 * see Documentation/locking/lockdep-design.rst for more details.
10 #ifndef __LINUX_LOCKDEP_H
11 #define __LINUX_LOCKDEP_H
13 #include <linux/lockdep_types.h>
14 #include <asm/percpu.h>
19 extern int prove_locking;
24 #include <linux/linkage.h>
25 #include <linux/list.h>
26 #include <linux/debug_locks.h>
27 #include <linux/stacktrace.h>
29 static inline void lockdep_copy_map(struct lockdep_map *to,
30 struct lockdep_map *from)
36 * Since the class cache can be modified concurrently we could observe
37 * half pointers (64bit arch using 32bit copy insns). Therefore clear
38 * the caches and take the performance hit.
40 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
41 * that relies on cache abuse.
43 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
44 to->class_cache[i] = NULL;
48 * Every lock has a list of other locks that were taken after it.
49 * We only grow the list, never remove from it:
52 struct list_head entry;
53 struct lock_class *class;
54 struct lock_class *links_to;
55 const struct lock_trace *trace;
59 * The parent field is used to implement breadth-first search, and the
60 * bit 0 is reused to indicate if the lock has been accessed in BFS.
62 struct lock_list *parent;
66 * struct lock_chain - lock dependency chain record
68 * @irq_context: the same as irq_context in held_lock below
69 * @depth: the number of held locks in this chain
70 * @base: the index in chain_hlocks for this chain
71 * @entry: the collided lock chains in lock_chain hash list
72 * @chain_key: the hash key of this lock_chain
75 /* see BUILD_BUG_ON()s in add_chain_cache() */
76 unsigned int irq_context : 2,
80 struct hlist_node entry;
84 #define MAX_LOCKDEP_KEYS_BITS 13
85 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
86 #define INITIAL_CHAIN_KEY -1
90 * One-way hash of the dependency chain up to this point. We
91 * hash the hashes step by step as the dependency chain grows.
93 * We use it for dependency-caching and we skip detection
94 * passes and dependency-updates if there is a cache-hit, so
95 * it is absolutely critical for 100% coverage of the validator
96 * to have a unique key value for every unique dependency path
97 * that can occur in the system, to make a unique hash value
98 * as likely as possible - hence the 64-bit width.
100 * The task struct holds the current hash value (initialized
101 * with zero), here we store the previous hash value:
104 unsigned long acquire_ip;
105 struct lockdep_map *instance;
106 struct lockdep_map *nest_lock;
107 #ifdef CONFIG_LOCK_STAT
112 * class_idx is zero-indexed; it points to the element in
113 * lock_classes this held lock instance belongs to. class_idx is in
114 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
116 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
118 * The lock-stack is unified in that the lock chains of interrupt
119 * contexts nest ontop of process context chains, but we 'separate'
120 * the hashes by starting with 0 if we cross into an interrupt
121 * context, and we also keep do not add cross-context lock
122 * dependencies - the lock usage graph walking covers that area
123 * anyway, and we'd just unnecessarily increase the number of
124 * dependencies otherwise. [Note: hardirq and softirq contexts
125 * are separated from each other too.]
127 * The following field is used to detect when we cross into an
130 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
131 unsigned int trylock:1; /* 16 bits */
133 unsigned int read:2; /* see lock_acquire() comment */
134 unsigned int check:1; /* see lock_acquire() comment */
135 unsigned int hardirqs_off:1;
136 unsigned int references:12; /* 32 bits */
137 unsigned int pin_count;
141 * Initialization, self-test and debugging-output methods:
143 extern void lockdep_init(void);
144 extern void lockdep_reset(void);
145 extern void lockdep_reset_lock(struct lockdep_map *lock);
146 extern void lockdep_free_key_range(void *start, unsigned long size);
147 extern asmlinkage void lockdep_sys_exit(void);
148 extern void lockdep_set_selftest_task(struct task_struct *task);
150 extern void lockdep_init_task(struct task_struct *task);
153 * Split the recrursion counter in two to readily detect 'off' vs recursion.
155 #define LOCKDEP_RECURSION_BITS 16
156 #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
157 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
160 * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
161 * to header dependencies.
164 #define lockdep_off() \
166 current->lockdep_recursion += LOCKDEP_OFF; \
169 #define lockdep_on() \
171 current->lockdep_recursion -= LOCKDEP_OFF; \
174 extern void lockdep_register_key(struct lock_class_key *key);
175 extern void lockdep_unregister_key(struct lock_class_key *key);
178 * These methods are used by specific locking variants (spinlocks,
179 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
183 extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
184 struct lock_class_key *key, int subclass, short inner, short outer);
187 lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
188 struct lock_class_key *key, int subclass, short inner)
190 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
193 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
194 struct lock_class_key *key, int subclass)
196 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
200 * Reinitialize a lock key - for cases where there is special locking or
201 * special initialization of locks so that the validator gets the scope
202 * of dependencies wrong: they are either too broad (they need a class-split)
203 * or they are too narrow (they suffer from a false class-split):
205 #define lockdep_set_class(lock, key) \
206 lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
207 (lock)->dep_map.wait_type_inner, \
208 (lock)->dep_map.wait_type_outer)
210 #define lockdep_set_class_and_name(lock, key, name) \
211 lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
212 (lock)->dep_map.wait_type_inner, \
213 (lock)->dep_map.wait_type_outer)
215 #define lockdep_set_class_and_subclass(lock, key, sub) \
216 lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
217 (lock)->dep_map.wait_type_inner, \
218 (lock)->dep_map.wait_type_outer)
220 #define lockdep_set_subclass(lock, sub) \
221 lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
222 (lock)->dep_map.wait_type_inner, \
223 (lock)->dep_map.wait_type_outer)
225 #define lockdep_set_novalidate_class(lock) \
226 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
229 * Compare locking classes
231 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
233 static inline int lockdep_match_key(struct lockdep_map *lock,
234 struct lock_class_key *key)
236 return lock->key == key;
244 * 0: exclusive (write) acquire
245 * 1: read-acquire (no recursion allowed)
246 * 2: read-acquire with same-instance recursion allowed
250 * 0: simple checks (freeing, held-at-exit-time, etc.)
253 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
254 int trylock, int read, int check,
255 struct lockdep_map *nest_lock, unsigned long ip);
257 extern void lock_release(struct lockdep_map *lock, unsigned long ip);
260 * Same "read" as for lock_acquire(), except -1 means any.
262 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
264 static inline int lock_is_held(const struct lockdep_map *lock)
266 return lock_is_held_type(lock, -1);
269 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
270 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
272 extern void lock_set_class(struct lockdep_map *lock, const char *name,
273 struct lock_class_key *key, unsigned int subclass,
276 static inline void lock_set_subclass(struct lockdep_map *lock,
277 unsigned int subclass, unsigned long ip)
279 lock_set_class(lock, lock->name, lock->key, subclass, ip);
282 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
284 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
286 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
287 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
288 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
290 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
292 #define lockdep_assert_held(l) do { \
293 WARN_ON(debug_locks && !lockdep_is_held(l)); \
296 #define lockdep_assert_held_write(l) do { \
297 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
300 #define lockdep_assert_held_read(l) do { \
301 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
304 #define lockdep_assert_held_once(l) do { \
305 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
308 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
310 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
311 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
312 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
314 #else /* !CONFIG_LOCKDEP */
316 static inline void lockdep_init_task(struct task_struct *task)
320 static inline void lockdep_off(void)
324 static inline void lockdep_on(void)
328 static inline void lockdep_set_selftest_task(struct task_struct *task)
332 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
333 # define lock_release(l, i) do { } while (0)
334 # define lock_downgrade(l, i) do { } while (0)
335 # define lock_set_class(l, n, k, s, i) do { } while (0)
336 # define lock_set_subclass(l, s, i) do { } while (0)
337 # define lockdep_init() do { } while (0)
338 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
339 do { (void)(name); (void)(key); } while (0)
340 # define lockdep_init_map_wait(lock, name, key, sub, inner) \
341 do { (void)(name); (void)(key); } while (0)
342 # define lockdep_init_map(lock, name, key, sub) \
343 do { (void)(name); (void)(key); } while (0)
344 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
345 # define lockdep_set_class_and_name(lock, key, name) \
346 do { (void)(key); (void)(name); } while (0)
347 #define lockdep_set_class_and_subclass(lock, key, sub) \
348 do { (void)(key); } while (0)
349 #define lockdep_set_subclass(lock, sub) do { } while (0)
351 #define lockdep_set_novalidate_class(lock) do { } while (0)
354 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
355 * case since the result is not well defined and the caller should rather
356 * #ifdef the call himself.
359 # define lockdep_reset() do { debug_locks = 1; } while (0)
360 # define lockdep_free_key_range(start, size) do { } while (0)
361 # define lockdep_sys_exit() do { } while (0)
363 static inline void lockdep_register_key(struct lock_class_key *key)
367 static inline void lockdep_unregister_key(struct lock_class_key *key)
371 #define lockdep_depth(tsk) (0)
373 #define lockdep_is_held_type(l, r) (1)
375 #define lockdep_assert_held(l) do { (void)(l); } while (0)
376 #define lockdep_assert_held_write(l) do { (void)(l); } while (0)
377 #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
378 #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
380 #define lockdep_recursing(tsk) (0)
382 #define NIL_COOKIE (struct pin_cookie){ }
384 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
385 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
386 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
388 #endif /* !LOCKDEP */
390 enum xhlock_context_t {
396 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
398 * To initialize a lockdep_map statically use this macro.
399 * Note that _name must not be NULL.
401 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
402 { .name = (_name), .key = (void *)(_key), }
404 static inline void lockdep_invariant_state(bool force) {}
405 static inline void lockdep_free_task(struct task_struct *task) {}
407 #ifdef CONFIG_LOCK_STAT
409 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
410 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
412 #define LOCK_CONTENDED(_lock, try, lock) \
415 lock_contended(&(_lock)->dep_map, _RET_IP_); \
418 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
421 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
425 lock_contended(&(_lock)->dep_map, _RET_IP_); \
426 ____err = lock(_lock); \
429 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
433 #else /* CONFIG_LOCK_STAT */
435 #define lock_contended(lockdep_map, ip) do {} while (0)
436 #define lock_acquired(lockdep_map, ip) do {} while (0)
438 #define LOCK_CONTENDED(_lock, try, lock) \
441 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
444 #endif /* CONFIG_LOCK_STAT */
446 #ifdef CONFIG_LOCKDEP
449 * On lockdep we dont want the hand-coded irq-enable of
450 * _raw_*_lock_flags() code, because lockdep assumes
451 * that interrupts are not re-enabled during lock-acquire:
453 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
454 LOCK_CONTENDED((_lock), (try), (lock))
456 #else /* CONFIG_LOCKDEP */
458 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
459 lockfl((_lock), (flags))
461 #endif /* CONFIG_LOCKDEP */
463 #ifdef CONFIG_PROVE_LOCKING
464 extern void print_irqtrace_events(struct task_struct *curr);
466 static inline void print_irqtrace_events(struct task_struct *curr)
472 * For trivial one-depth nesting of a lock-class, the following
473 * global define can be used. (Subsystems with multiple levels
474 * of nesting should define their own lock-nesting subclasses.)
476 #define SINGLE_DEPTH_NESTING 1
479 * Map the dependency ops to NOP or to real lockdep ops, depending
480 * on the per lock-class debug mode:
483 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
484 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
485 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
487 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
488 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
489 #define spin_release(l, i) lock_release(l, i)
491 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
492 #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
493 #define rwlock_release(l, i) lock_release(l, i)
495 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
496 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
497 #define seqcount_release(l, i) lock_release(l, i)
499 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
500 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
501 #define mutex_release(l, i) lock_release(l, i)
503 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
504 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
505 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
506 #define rwsem_release(l, i) lock_release(l, i)
508 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
509 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
510 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
511 #define lock_map_release(l) lock_release(l, _THIS_IP_)
513 #ifdef CONFIG_PROVE_LOCKING
514 # define might_lock(lock) \
516 typecheck(struct lockdep_map *, &(lock)->dep_map); \
517 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
518 lock_release(&(lock)->dep_map, _THIS_IP_); \
520 # define might_lock_read(lock) \
522 typecheck(struct lockdep_map *, &(lock)->dep_map); \
523 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
524 lock_release(&(lock)->dep_map, _THIS_IP_); \
526 # define might_lock_nested(lock, subclass) \
528 typecheck(struct lockdep_map *, &(lock)->dep_map); \
529 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
531 lock_release(&(lock)->dep_map, _THIS_IP_); \
534 DECLARE_PER_CPU(int, hardirqs_enabled);
535 DECLARE_PER_CPU(int, hardirq_context);
537 #define lockdep_assert_irqs_enabled() \
539 WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \
542 #define lockdep_assert_irqs_disabled() \
544 WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \
547 #define lockdep_assert_in_irq() \
549 WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \
552 #define lockdep_assert_preemption_enabled() \
554 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
556 (preempt_count() != 0 || \
557 !this_cpu_read(hardirqs_enabled))); \
560 #define lockdep_assert_preemption_disabled() \
562 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
564 (preempt_count() == 0 && \
565 this_cpu_read(hardirqs_enabled))); \
569 # define might_lock(lock) do { } while (0)
570 # define might_lock_read(lock) do { } while (0)
571 # define might_lock_nested(lock, subclass) do { } while (0)
573 # define lockdep_assert_irqs_enabled() do { } while (0)
574 # define lockdep_assert_irqs_disabled() do { } while (0)
575 # define lockdep_assert_in_irq() do { } while (0)
577 # define lockdep_assert_preemption_enabled() do { } while (0)
578 # define lockdep_assert_preemption_disabled() do { } while (0)
581 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
583 # define lockdep_assert_RT_in_threaded_ctx() do { \
584 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
585 lockdep_hardirq_context() && \
586 !(current->hardirq_threaded || current->irq_config), \
587 "Not in threaded context on PREEMPT_RT as expected\n"); \
592 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
596 #ifdef CONFIG_LOCKDEP
597 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
600 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
605 #endif /* __LINUX_LOCKDEP_H */