1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SEQLOCK_H
3 #define __LINUX_SEQLOCK_H
6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7 * lockless readers (read-only retry loops), and no writer starvation.
9 * See Documentation/locking/seqlock.rst
12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
16 #include <linux/compiler.h>
17 #include <linux/kcsan-checks.h>
18 #include <linux/lockdep.h>
19 #include <linux/mutex.h>
20 #include <linux/preempt.h>
21 #include <linux/spinlock.h>
22 #include <linux/ww_mutex.h>
24 #include <asm/processor.h>
27 * The seqlock seqcount_t interface does not prescribe a precise sequence of
28 * read begin/retry/end. For readers, typically there is a call to
29 * read_seqcount_begin() and read_seqcount_retry(), however, there are more
30 * esoteric cases which do not follow this pattern.
32 * As a consequence, we take the following best-effort approach for raw usage
33 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
34 * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
35 * atomics; if there is a matching read_seqcount_retry() call, no following
36 * memory operations are considered atomic. Usage of the seqlock_t interface
39 #define KCSAN_SEQLOCK_REGION_MAX 1000
42 * Sequence counters (seqcount_t)
44 * This is the raw counting mechanism, without any writer protection.
46 * Write side critical sections must be serialized and non-preemptible.
48 * If readers can be invoked from hardirq or softirq contexts,
49 * interrupts or bottom halves must also be respectively disabled before
50 * entering the write section.
52 * This mechanism can't be used if the protected data contains pointers,
53 * as the writer can invalidate a pointer that a reader is following.
55 * If the write serialization mechanism is one of the common kernel
56 * locking primitives, use a sequence counter with associated lock
57 * (seqcount_LOCKTYPE_t) instead.
59 * If it's desired to automatically handle the sequence counter writer
60 * serialization and non-preemptibility requirements, use a sequential
61 * lock (seqlock_t) instead.
63 * See Documentation/locking/seqlock.rst
65 typedef struct seqcount {
67 #ifdef CONFIG_DEBUG_LOCK_ALLOC
68 struct lockdep_map dep_map;
72 static inline void __seqcount_init(seqcount_t *s, const char *name,
73 struct lock_class_key *key)
76 * Make sure we are not reinitializing a held lock:
78 lockdep_init_map(&s->dep_map, name, key, 0);
82 #ifdef CONFIG_DEBUG_LOCK_ALLOC
84 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
85 .dep_map = { .name = #lockname }
88 * seqcount_init() - runtime initializer for seqcount_t
89 * @s: Pointer to the seqcount_t instance
91 # define seqcount_init(s) \
93 static struct lock_class_key __key; \
94 __seqcount_init((s), #s, &__key); \
97 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
99 seqcount_t *l = (seqcount_t *)s;
102 local_irq_save(flags);
103 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
104 seqcount_release(&l->dep_map, _RET_IP_);
105 local_irq_restore(flags);
109 # define SEQCOUNT_DEP_MAP_INIT(lockname)
110 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
111 # define seqcount_lockdep_reader_access(x)
115 * SEQCNT_ZERO() - static initializer for seqcount_t
116 * @name: Name of the seqcount_t instance
118 #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
121 * Sequence counters with associated locks (seqcount_LOCKTYPE_t)
123 * A sequence counter which associates the lock used for writer
124 * serialization at initialization time. This enables lockdep to validate
125 * that the write side critical section is properly serialized.
127 * For associated locks which do not implicitly disable preemption,
128 * preemption protection is enforced in the write side function.
130 * Lockdep is never used in any for the raw write variants.
132 * See Documentation/locking/seqlock.rst
135 #ifdef CONFIG_LOCKDEP
136 #define __SEQ_LOCK(expr) expr
138 #define __SEQ_LOCK(expr)
141 #define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \
142 .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
143 __SEQ_LOCK(.lock = (assoc_lock)) \
146 #define seqcount_locktype_init(s, assoc_lock) \
148 seqcount_init(&(s)->seqcount); \
149 __SEQ_LOCK((s)->lock = (assoc_lock)); \
153 * SEQCNT_SPINLOCK_ZERO - static initializer for seqcount_spinlock_t
154 * @name: Name of the seqcount_spinlock_t instance
155 * @lock: Pointer to the associated spinlock
157 #define SEQCNT_SPINLOCK_ZERO(name, lock) \
158 SEQCOUNT_LOCKTYPE_ZERO(name, lock)
161 * seqcount_spinlock_init - runtime initializer for seqcount_spinlock_t
162 * @s: Pointer to the seqcount_spinlock_t instance
163 * @lock: Pointer to the associated spinlock
165 #define seqcount_spinlock_init(s, lock) \
166 seqcount_locktype_init(s, lock)
169 * SEQCNT_RAW_SPINLOCK_ZERO - static initializer for seqcount_raw_spinlock_t
170 * @name: Name of the seqcount_raw_spinlock_t instance
171 * @lock: Pointer to the associated raw_spinlock
173 #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) \
174 SEQCOUNT_LOCKTYPE_ZERO(name, lock)
177 * seqcount_raw_spinlock_init - runtime initializer for seqcount_raw_spinlock_t
178 * @s: Pointer to the seqcount_raw_spinlock_t instance
179 * @lock: Pointer to the associated raw_spinlock
181 #define seqcount_raw_spinlock_init(s, lock) \
182 seqcount_locktype_init(s, lock)
185 * SEQCNT_RWLOCK_ZERO - static initializer for seqcount_rwlock_t
186 * @name: Name of the seqcount_rwlock_t instance
187 * @lock: Pointer to the associated rwlock
189 #define SEQCNT_RWLOCK_ZERO(name, lock) \
190 SEQCOUNT_LOCKTYPE_ZERO(name, lock)
193 * seqcount_rwlock_init - runtime initializer for seqcount_rwlock_t
194 * @s: Pointer to the seqcount_rwlock_t instance
195 * @lock: Pointer to the associated rwlock
197 #define seqcount_rwlock_init(s, lock) \
198 seqcount_locktype_init(s, lock)
201 * SEQCNT_MUTEX_ZERO - static initializer for seqcount_mutex_t
202 * @name: Name of the seqcount_mutex_t instance
203 * @lock: Pointer to the associated mutex
205 #define SEQCNT_MUTEX_ZERO(name, lock) \
206 SEQCOUNT_LOCKTYPE_ZERO(name, lock)
209 * seqcount_mutex_init - runtime initializer for seqcount_mutex_t
210 * @s: Pointer to the seqcount_mutex_t instance
211 * @lock: Pointer to the associated mutex
213 #define seqcount_mutex_init(s, lock) \
214 seqcount_locktype_init(s, lock)
217 * SEQCNT_WW_MUTEX_ZERO - static initializer for seqcount_ww_mutex_t
218 * @name: Name of the seqcount_ww_mutex_t instance
219 * @lock: Pointer to the associated ww_mutex
221 #define SEQCNT_WW_MUTEX_ZERO(name, lock) \
222 SEQCOUNT_LOCKTYPE_ZERO(name, lock)
225 * seqcount_ww_mutex_init - runtime initializer for seqcount_ww_mutex_t
226 * @s: Pointer to the seqcount_ww_mutex_t instance
227 * @lock: Pointer to the associated ww_mutex
229 #define seqcount_ww_mutex_init(s, lock) \
230 seqcount_locktype_init(s, lock)
233 * typedef seqcount_LOCKNAME_t - sequence counter with spinlock associated
234 * @seqcount: The real sequence counter
235 * @lock: Pointer to the associated spinlock
237 * A plain sequence counter with external writer synchronization by a
238 * spinlock. The spinlock is associated to the sequence count in the
239 * static initializer or init function. This enables lockdep to validate
240 * that the write side critical section is properly serialized.
244 * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
245 * @locktype: actual typename
247 * @preemptible: preemptibility of above locktype
248 * @lockmember: argument for lockdep_assert_held()
250 #define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
251 typedef struct seqcount_##lockname { \
252 seqcount_t seqcount; \
253 __SEQ_LOCK(locktype *lock); \
254 } seqcount_##lockname##_t; \
256 static __always_inline seqcount_t * \
257 __seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
259 return &s->seqcount; \
262 static __always_inline bool \
263 __seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \
265 return preemptible; \
268 static __always_inline void \
269 __seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
271 __SEQ_LOCK(lockdep_assert_held(lockmember)); \
275 * __seqprop() for seqcount_t
278 static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
283 static inline bool __seqcount_preemptible(seqcount_t *s)
288 static inline void __seqcount_assert(seqcount_t *s)
290 lockdep_assert_preemption_disabled();
293 SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock)
294 SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock)
295 SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock)
296 SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
297 SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
299 #define __seqprop_case(s, lockname, prop) \
300 seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
302 #define __seqprop(s, prop) _Generic(*(s), \
303 seqcount_t: __seqcount_##prop((void *)(s)), \
304 __seqprop_case((s), raw_spinlock, prop), \
305 __seqprop_case((s), spinlock, prop), \
306 __seqprop_case((s), rwlock, prop), \
307 __seqprop_case((s), mutex, prop), \
308 __seqprop_case((s), ww_mutex, prop))
310 #define __to_seqcount_t(s) __seqprop(s, ptr)
311 #define __associated_lock_exists_and_is_preemptible(s) __seqprop(s, preemptible)
312 #define __assert_write_section_is_protected(s) __seqprop(s, assert)
315 * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
316 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
318 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
319 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
320 * provided before actually loading any of the variables that are to be
321 * protected in this critical section.
323 * Use carefully, only in critical code, and comment how the barrier is
326 * Return: count to be passed to read_seqcount_retry()
328 #define __read_seqcount_begin(s) \
329 __read_seqcount_t_begin(__to_seqcount_t(s))
331 static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
336 ret = READ_ONCE(s->sequence);
337 if (unlikely(ret & 1)) {
341 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
346 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
347 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
349 * Return: count to be passed to read_seqcount_retry()
351 #define raw_read_seqcount_begin(s) \
352 raw_read_seqcount_t_begin(__to_seqcount_t(s))
354 static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
356 unsigned ret = __read_seqcount_t_begin(s);
362 * read_seqcount_begin() - begin a seqcount_t read critical section
363 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
365 * Return: count to be passed to read_seqcount_retry()
367 #define read_seqcount_begin(s) \
368 read_seqcount_t_begin(__to_seqcount_t(s))
370 static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
372 seqcount_lockdep_reader_access(s);
373 return raw_read_seqcount_t_begin(s);
377 * raw_read_seqcount() - read the raw seqcount_t counter value
378 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
380 * raw_read_seqcount opens a read critical section of the given
381 * seqcount_t, without any lockdep checking, and without checking or
382 * masking the sequence counter LSB. Calling code is responsible for
385 * Return: count to be passed to read_seqcount_retry()
387 #define raw_read_seqcount(s) \
388 raw_read_seqcount_t(__to_seqcount_t(s))
390 static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
392 unsigned ret = READ_ONCE(s->sequence);
394 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
399 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
400 * lockdep and w/o counter stabilization
401 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
403 * raw_seqcount_begin opens a read critical section of the given
404 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
405 * for the count to stabilize. If a writer is active when it begins, it
406 * will fail the read_seqcount_retry() at the end of the read critical
407 * section instead of stabilizing at the beginning of it.
409 * Use this only in special kernel hot paths where the read section is
410 * small and has a high probability of success through other external
411 * means. It will save a single branching instruction.
413 * Return: count to be passed to read_seqcount_retry()
415 #define raw_seqcount_begin(s) \
416 raw_seqcount_t_begin(__to_seqcount_t(s))
418 static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
421 * If the counter is odd, let read_seqcount_retry() fail
422 * by decrementing the counter.
424 return raw_read_seqcount_t(s) & ~1;
428 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
429 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
430 * @start: count, from read_seqcount_begin()
432 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
433 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
434 * provided before actually loading any of the variables that are to be
435 * protected in this critical section.
437 * Use carefully, only in critical code, and comment how the barrier is
440 * Return: true if a read section retry is required, else false
442 #define __read_seqcount_retry(s, start) \
443 __read_seqcount_t_retry(__to_seqcount_t(s), start)
445 static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
447 kcsan_atomic_next(0);
448 return unlikely(READ_ONCE(s->sequence) != start);
452 * read_seqcount_retry() - end a seqcount_t read critical section
453 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
454 * @start: count, from read_seqcount_begin()
456 * read_seqcount_retry closes the read critical section of given
457 * seqcount_t. If the critical section was invalid, it must be ignored
458 * (and typically retried).
460 * Return: true if a read section retry is required, else false
462 #define read_seqcount_retry(s, start) \
463 read_seqcount_t_retry(__to_seqcount_t(s), start)
465 static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
468 return __read_seqcount_t_retry(s, start);
472 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
473 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
475 #define raw_write_seqcount_begin(s) \
477 if (__associated_lock_exists_and_is_preemptible(s)) \
480 raw_write_seqcount_t_begin(__to_seqcount_t(s)); \
483 static inline void raw_write_seqcount_t_begin(seqcount_t *s)
485 kcsan_nestable_atomic_begin();
491 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
492 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
494 #define raw_write_seqcount_end(s) \
496 raw_write_seqcount_t_end(__to_seqcount_t(s)); \
498 if (__associated_lock_exists_and_is_preemptible(s)) \
502 static inline void raw_write_seqcount_t_end(seqcount_t *s)
506 kcsan_nestable_atomic_end();
510 * write_seqcount_begin_nested() - start a seqcount_t write section with
511 * custom lockdep nesting level
512 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
513 * @subclass: lockdep nesting level
515 * See Documentation/locking/lockdep-design.rst
517 #define write_seqcount_begin_nested(s, subclass) \
519 __assert_write_section_is_protected(s); \
521 if (__associated_lock_exists_and_is_preemptible(s)) \
524 write_seqcount_t_begin_nested(__to_seqcount_t(s), subclass); \
527 static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
529 raw_write_seqcount_t_begin(s);
530 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
534 * write_seqcount_begin() - start a seqcount_t write side critical section
535 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
537 * write_seqcount_begin opens a write side critical section of the given
540 * Context: seqcount_t write side critical sections must be serialized and
541 * non-preemptible. If readers can be invoked from hardirq or softirq
542 * context, interrupts or bottom halves must be respectively disabled.
544 #define write_seqcount_begin(s) \
546 __assert_write_section_is_protected(s); \
548 if (__associated_lock_exists_and_is_preemptible(s)) \
551 write_seqcount_t_begin(__to_seqcount_t(s)); \
554 static inline void write_seqcount_t_begin(seqcount_t *s)
556 write_seqcount_t_begin_nested(s, 0);
560 * write_seqcount_end() - end a seqcount_t write side critical section
561 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
563 * The write section must've been opened with write_seqcount_begin().
565 #define write_seqcount_end(s) \
567 write_seqcount_t_end(__to_seqcount_t(s)); \
569 if (__associated_lock_exists_and_is_preemptible(s)) \
573 static inline void write_seqcount_t_end(seqcount_t *s)
575 seqcount_release(&s->dep_map, _RET_IP_);
576 raw_write_seqcount_t_end(s);
580 * raw_write_seqcount_barrier() - do a seqcount_t write barrier
581 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
583 * This can be used to provide an ordering guarantee instead of the usual
584 * consistency guarantee. It is one wmb cheaper, because it can collapse
585 * the two back-to-back wmb()s.
587 * Note that writes surrounding the barrier should be declared atomic (e.g.
588 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
589 * atomically, avoiding compiler optimizations; b) to document which writes are
590 * meant to propagate to the reader critical section. This is necessary because
591 * neither writes before and after the barrier are enclosed in a seq-writer
592 * critical section that would ensure readers are aware of ongoing writes::
595 * bool X = true, Y = false;
602 * int s = read_seqcount_begin(&seq);
606 * } while (read_seqcount_retry(&seq, s));
613 * WRITE_ONCE(Y, true);
615 * raw_write_seqcount_barrier(seq);
617 * WRITE_ONCE(X, false);
620 #define raw_write_seqcount_barrier(s) \
621 raw_write_seqcount_t_barrier(__to_seqcount_t(s))
623 static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
625 kcsan_nestable_atomic_begin();
629 kcsan_nestable_atomic_end();
633 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
635 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
637 * After write_seqcount_invalidate, no seqcount_t read side operations
638 * will complete successfully and see data older than this.
640 #define write_seqcount_invalidate(s) \
641 write_seqcount_t_invalidate(__to_seqcount_t(s))
643 static inline void write_seqcount_t_invalidate(seqcount_t *s)
646 kcsan_nestable_atomic_begin();
648 kcsan_nestable_atomic_end();
652 * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
653 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
655 * Use seqcount_t latching to switch between two storage places protected
656 * by a sequence counter. Doing so allows having interruptible, preemptible,
657 * seqcount_t write side critical sections.
659 * Check raw_write_seqcount_latch() for more details and a full reader and
660 * writer usage example.
662 * Return: sequence counter raw value. Use the lowest bit as an index for
663 * picking which data copy to read. The full counter value must then be
664 * checked with read_seqcount_retry().
666 #define raw_read_seqcount_latch(s) \
667 raw_read_seqcount_t_latch(__to_seqcount_t(s))
669 static inline int raw_read_seqcount_t_latch(seqcount_t *s)
671 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
672 int seq = READ_ONCE(s->sequence); /* ^^^ */
677 * raw_write_seqcount_latch() - redirect readers to even/odd copy
678 * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
680 * The latch technique is a multiversion concurrency control method that allows
681 * queries during non-atomic modifications. If you can guarantee queries never
682 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
683 * -- you most likely do not need this.
685 * Where the traditional RCU/lockless data structures rely on atomic
686 * modifications to ensure queries observe either the old or the new state the
687 * latch allows the same for non-atomic updates. The trade-off is doubling the
688 * cost of storage; we have to maintain two copies of the entire data
691 * Very simply put: we first modify one copy and then the other. This ensures
692 * there is always one copy in a stable state, ready to give us an answer.
694 * The basic form is a data structure like::
696 * struct latch_struct {
698 * struct data_struct data[2];
701 * Where a modification, which is assumed to be externally serialized, does the
704 * void latch_modify(struct latch_struct *latch, ...)
706 * smp_wmb(); // Ensure that the last data[1] update is visible
708 * smp_wmb(); // Ensure that the seqcount update is visible
710 * modify(latch->data[0], ...);
712 * smp_wmb(); // Ensure that the data[0] update is visible
714 * smp_wmb(); // Ensure that the seqcount update is visible
716 * modify(latch->data[1], ...);
719 * The query will have a form like::
721 * struct entry *latch_query(struct latch_struct *latch, ...)
723 * struct entry *entry;
727 * seq = raw_read_seqcount_latch(&latch->seq);
730 * entry = data_query(latch->data[idx], ...);
732 * // read_seqcount_retry() includes needed smp_rmb()
733 * } while (read_seqcount_retry(&latch->seq, seq));
738 * So during the modification, queries are first redirected to data[1]. Then we
739 * modify data[0]. When that is complete, we redirect queries back to data[0]
740 * and we can modify data[1].
744 * The non-requirement for atomic modifications does _NOT_ include
745 * the publishing of new entries in the case where data is a dynamic
748 * An iteration might start in data[0] and get suspended long enough
749 * to miss an entire modification sequence, once it resumes it might
750 * observe the new entry.
754 * When data is a dynamic data structure; one should use regular RCU
755 * patterns to manage the lifetimes of the objects within.
757 #define raw_write_seqcount_latch(s) \
758 raw_write_seqcount_t_latch(__to_seqcount_t(s))
760 static inline void raw_write_seqcount_t_latch(seqcount_t *s)
762 smp_wmb(); /* prior stores before incrementing "sequence" */
764 smp_wmb(); /* increment "sequence" before following stores */
768 * Sequential locks (seqlock_t)
770 * Sequence counters with an embedded spinlock for writer serialization
771 * and non-preemptibility.
773 * For more info, see:
774 * - Comments on top of seqcount_t
775 * - Documentation/locking/seqlock.rst
778 struct seqcount seqcount;
782 #define __SEQLOCK_UNLOCKED(lockname) \
784 .seqcount = SEQCNT_ZERO(lockname), \
785 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
789 * seqlock_init() - dynamic initializer for seqlock_t
790 * @sl: Pointer to the seqlock_t instance
792 #define seqlock_init(sl) \
794 seqcount_init(&(sl)->seqcount); \
795 spin_lock_init(&(sl)->lock); \
799 * DEFINE_SEQLOCK() - Define a statically allocated seqlock_t
800 * @sl: Name of the seqlock_t instance
802 #define DEFINE_SEQLOCK(sl) \
803 seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
806 * read_seqbegin() - start a seqlock_t read side critical section
807 * @sl: Pointer to seqlock_t
809 * Return: count, to be passed to read_seqretry()
811 static inline unsigned read_seqbegin(const seqlock_t *sl)
813 unsigned ret = read_seqcount_begin(&sl->seqcount);
815 kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
816 kcsan_flat_atomic_begin();
821 * read_seqretry() - end a seqlock_t read side section
822 * @sl: Pointer to seqlock_t
823 * @start: count, from read_seqbegin()
825 * read_seqretry closes the read side critical section of given seqlock_t.
826 * If the critical section was invalid, it must be ignored (and typically
829 * Return: true if a read section retry is required, else false
831 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
834 * Assume not nested: read_seqretry() may be called multiple times when
835 * completing read critical section.
837 kcsan_flat_atomic_end();
839 return read_seqcount_retry(&sl->seqcount, start);
843 * write_seqlock() - start a seqlock_t write side critical section
844 * @sl: Pointer to seqlock_t
846 * write_seqlock opens a write side critical section for the given
847 * seqlock_t. It also implicitly acquires the spinlock_t embedded inside
848 * that sequential lock. All seqlock_t write side sections are thus
849 * automatically serialized and non-preemptible.
851 * Context: if the seqlock_t read section, or other write side critical
852 * sections, can be invoked from hardirq or softirq contexts, use the
853 * _irqsave or _bh variants of this function instead.
855 static inline void write_seqlock(seqlock_t *sl)
857 spin_lock(&sl->lock);
858 write_seqcount_t_begin(&sl->seqcount);
862 * write_sequnlock() - end a seqlock_t write side critical section
863 * @sl: Pointer to seqlock_t
865 * write_sequnlock closes the (serialized and non-preemptible) write side
866 * critical section of given seqlock_t.
868 static inline void write_sequnlock(seqlock_t *sl)
870 write_seqcount_t_end(&sl->seqcount);
871 spin_unlock(&sl->lock);
875 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
876 * @sl: Pointer to seqlock_t
878 * _bh variant of write_seqlock(). Use only if the read side section, or
879 * other write side sections, can be invoked from softirq contexts.
881 static inline void write_seqlock_bh(seqlock_t *sl)
883 spin_lock_bh(&sl->lock);
884 write_seqcount_t_begin(&sl->seqcount);
888 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
889 * @sl: Pointer to seqlock_t
891 * write_sequnlock_bh closes the serialized, non-preemptible, and
892 * softirqs-disabled, seqlock_t write side critical section opened with
893 * write_seqlock_bh().
895 static inline void write_sequnlock_bh(seqlock_t *sl)
897 write_seqcount_t_end(&sl->seqcount);
898 spin_unlock_bh(&sl->lock);
902 * write_seqlock_irq() - start a non-interruptible seqlock_t write section
903 * @sl: Pointer to seqlock_t
905 * _irq variant of write_seqlock(). Use only if the read side section, or
906 * other write sections, can be invoked from hardirq contexts.
908 static inline void write_seqlock_irq(seqlock_t *sl)
910 spin_lock_irq(&sl->lock);
911 write_seqcount_t_begin(&sl->seqcount);
915 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
916 * @sl: Pointer to seqlock_t
918 * write_sequnlock_irq closes the serialized and non-interruptible
919 * seqlock_t write side section opened with write_seqlock_irq().
921 static inline void write_sequnlock_irq(seqlock_t *sl)
923 write_seqcount_t_end(&sl->seqcount);
924 spin_unlock_irq(&sl->lock);
927 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
931 spin_lock_irqsave(&sl->lock, flags);
932 write_seqcount_t_begin(&sl->seqcount);
937 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
939 * @lock: Pointer to seqlock_t
940 * @flags: Stack-allocated storage for saving caller's local interrupt
941 * state, to be passed to write_sequnlock_irqrestore().
943 * _irqsave variant of write_seqlock(). Use it only if the read side
944 * section, or other write sections, can be invoked from hardirq context.
946 #define write_seqlock_irqsave(lock, flags) \
947 do { flags = __write_seqlock_irqsave(lock); } while (0)
950 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
952 * @sl: Pointer to seqlock_t
953 * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
955 * write_sequnlock_irqrestore closes the serialized and non-interruptible
956 * seqlock_t write section previously opened with write_seqlock_irqsave().
959 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
961 write_seqcount_t_end(&sl->seqcount);
962 spin_unlock_irqrestore(&sl->lock, flags);
966 * read_seqlock_excl() - begin a seqlock_t locking reader section
967 * @sl: Pointer to seqlock_t
969 * read_seqlock_excl opens a seqlock_t locking reader critical section. A
970 * locking reader exclusively locks out *both* other writers *and* other
971 * locking readers, but it does not update the embedded sequence number.
973 * Locking readers act like a normal spin_lock()/spin_unlock().
975 * Context: if the seqlock_t write section, *or other read sections*, can
976 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
977 * variant of this function instead.
979 * The opened read section must be closed with read_sequnlock_excl().
981 static inline void read_seqlock_excl(seqlock_t *sl)
983 spin_lock(&sl->lock);
987 * read_sequnlock_excl() - end a seqlock_t locking reader critical section
988 * @sl: Pointer to seqlock_t
990 static inline void read_sequnlock_excl(seqlock_t *sl)
992 spin_unlock(&sl->lock);
996 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
998 * @sl: Pointer to seqlock_t
1000 * _bh variant of read_seqlock_excl(). Use this variant only if the
1001 * seqlock_t write side section, *or other read sections*, can be invoked
1002 * from softirq contexts.
1004 static inline void read_seqlock_excl_bh(seqlock_t *sl)
1006 spin_lock_bh(&sl->lock);
1010 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1012 * @sl: Pointer to seqlock_t
1014 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
1016 spin_unlock_bh(&sl->lock);
1020 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1022 * @sl: Pointer to seqlock_t
1024 * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
1025 * write side section, *or other read sections*, can be invoked from a
1028 static inline void read_seqlock_excl_irq(seqlock_t *sl)
1030 spin_lock_irq(&sl->lock);
1034 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1035 * locking reader section
1036 * @sl: Pointer to seqlock_t
1038 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
1040 spin_unlock_irq(&sl->lock);
1043 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
1045 unsigned long flags;
1047 spin_lock_irqsave(&sl->lock, flags);
1052 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1053 * locking reader section
1054 * @lock: Pointer to seqlock_t
1055 * @flags: Stack-allocated storage for saving caller's local interrupt
1056 * state, to be passed to read_sequnlock_excl_irqrestore().
1058 * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1059 * write side section, *or other read sections*, can be invoked from a
1062 #define read_seqlock_excl_irqsave(lock, flags) \
1063 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1066 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1067 * locking reader section
1068 * @sl: Pointer to seqlock_t
1069 * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1072 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1074 spin_unlock_irqrestore(&sl->lock, flags);
1078 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1079 * @lock: Pointer to seqlock_t
1080 * @seq : Marker and return parameter. If the passed value is even, the
1081 * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1082 * If the passed value is odd, the reader will become a *locking* reader
1083 * as in read_seqlock_excl(). In the first call to this function, the
1084 * caller *must* initialize and pass an even value to @seq; this way, a
1085 * lockless read can be optimistically tried first.
1087 * read_seqbegin_or_lock is an API designed to optimistically try a normal
1088 * lockless seqlock_t read section first. If an odd counter is found, the
1089 * lockless read trial has failed, and the next read iteration transforms
1090 * itself into a full seqlock_t locking reader.
1092 * This is typically used to avoid seqlock_t lockless readers starvation
1093 * (too much retry loops) in the case of a sharp spike in write side
1096 * Context: if the seqlock_t write section, *or other read sections*, can
1097 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1098 * variant of this function instead.
1100 * Check Documentation/locking/seqlock.rst for template example code.
1102 * Return: the encountered sequence counter value, through the @seq
1103 * parameter, which is overloaded as a return parameter. This returned
1104 * value must be checked with need_seqretry(). If the read section need to
1105 * be retried, this returned value must also be passed as the @seq
1106 * parameter of the next read_seqbegin_or_lock() iteration.
1108 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1110 if (!(*seq & 1)) /* Even */
1111 *seq = read_seqbegin(lock);
1113 read_seqlock_excl(lock);
1117 * need_seqretry() - validate seqlock_t "locking or lockless" read section
1118 * @lock: Pointer to seqlock_t
1119 * @seq: sequence count, from read_seqbegin_or_lock()
1121 * Return: true if a read section retry is required, false otherwise
1123 static inline int need_seqretry(seqlock_t *lock, int seq)
1125 return !(seq & 1) && read_seqretry(lock, seq);
1129 * done_seqretry() - end seqlock_t "locking or lockless" reader section
1130 * @lock: Pointer to seqlock_t
1131 * @seq: count, from read_seqbegin_or_lock()
1133 * done_seqretry finishes the seqlock_t read side critical section started
1134 * with read_seqbegin_or_lock() and validated by need_seqretry().
1136 static inline void done_seqretry(seqlock_t *lock, int seq)
1139 read_sequnlock_excl(lock);
1143 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1144 * a non-interruptible locking reader
1145 * @lock: Pointer to seqlock_t
1146 * @seq: Marker and return parameter. Check read_seqbegin_or_lock().
1148 * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1149 * the seqlock_t write section, *or other read sections*, can be invoked
1150 * from hardirq context.
1152 * Note: Interrupts will be disabled only for "locking reader" mode.
1156 * 1. The saved local interrupts state in case of a locking reader, to
1157 * be passed to done_seqretry_irqrestore().
1159 * 2. The encountered sequence counter value, returned through @seq
1160 * overloaded as a return parameter. Check read_seqbegin_or_lock().
1162 static inline unsigned long
1163 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1165 unsigned long flags = 0;
1167 if (!(*seq & 1)) /* Even */
1168 *seq = read_seqbegin(lock);
1170 read_seqlock_excl_irqsave(lock, flags);
1176 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1177 * non-interruptible locking reader section
1178 * @lock: Pointer to seqlock_t
1179 * @seq: Count, from read_seqbegin_or_lock_irqsave()
1180 * @flags: Caller's saved local interrupt state in case of a locking
1181 * reader, also from read_seqbegin_or_lock_irqsave()
1183 * This is the _irqrestore variant of done_seqretry(). The read section
1184 * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1185 * by need_seqretry().
1188 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1191 read_sequnlock_excl_irqrestore(lock, flags);
1193 #endif /* __LINUX_SEQLOCK_H */