1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SEQLOCK_H
3 #define __LINUX_SEQLOCK_H
6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7 * lockless readers (read-only retry loops), and no writer starvation.
9 * See Documentation/locking/seqlock.rst
12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
15 #include <linux/spinlock.h>
16 #include <linux/preempt.h>
17 #include <linux/lockdep.h>
18 #include <linux/compiler.h>
19 #include <linux/kcsan-checks.h>
20 #include <asm/processor.h>
23 * The seqlock seqcount_t interface does not prescribe a precise sequence of
24 * read begin/retry/end. For readers, typically there is a call to
25 * read_seqcount_begin() and read_seqcount_retry(), however, there are more
26 * esoteric cases which do not follow this pattern.
28 * As a consequence, we take the following best-effort approach for raw usage
29 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
30 * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
31 * atomics; if there is a matching read_seqcount_retry() call, no following
32 * memory operations are considered atomic. Usage of the seqlock_t interface
35 #define KCSAN_SEQLOCK_REGION_MAX 1000
38 * Sequence counters (seqcount_t)
40 * This is the raw counting mechanism, without any writer protection.
42 * Write side critical sections must be serialized and non-preemptible.
44 * If readers can be invoked from hardirq or softirq contexts,
45 * interrupts or bottom halves must also be respectively disabled before
46 * entering the write section.
48 * This mechanism can't be used if the protected data contains pointers,
49 * as the writer can invalidate a pointer that a reader is following.
51 * If it's desired to automatically handle the sequence counter writer
52 * serialization and non-preemptibility requirements, use a sequential
53 * lock (seqlock_t) instead.
55 * See Documentation/locking/seqlock.rst
57 typedef struct seqcount {
59 #ifdef CONFIG_DEBUG_LOCK_ALLOC
60 struct lockdep_map dep_map;
64 static inline void __seqcount_init(seqcount_t *s, const char *name,
65 struct lock_class_key *key)
68 * Make sure we are not reinitializing a held lock:
70 lockdep_init_map(&s->dep_map, name, key, 0);
74 #ifdef CONFIG_DEBUG_LOCK_ALLOC
75 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
76 .dep_map = { .name = #lockname } \
79 * seqcount_init() - runtime initializer for seqcount_t
80 * @s: Pointer to the seqcount_t instance
82 # define seqcount_init(s) \
84 static struct lock_class_key __key; \
85 __seqcount_init((s), #s, &__key); \
88 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
90 seqcount_t *l = (seqcount_t *)s;
93 local_irq_save(flags);
94 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
95 seqcount_release(&l->dep_map, _RET_IP_);
96 local_irq_restore(flags);
100 # define SEQCOUNT_DEP_MAP_INIT(lockname)
101 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
102 # define seqcount_lockdep_reader_access(x)
106 * SEQCNT_ZERO() - static initializer for seqcount_t
107 * @name: Name of the seqcount_t instance
109 #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
112 * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
113 * @s: Pointer to seqcount_t
115 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
116 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
117 * provided before actually loading any of the variables that are to be
118 * protected in this critical section.
120 * Use carefully, only in critical code, and comment how the barrier is
123 * Return: count to be passed to read_seqcount_retry()
125 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
130 ret = READ_ONCE(s->sequence);
131 if (unlikely(ret & 1)) {
135 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
140 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
141 * @s: Pointer to seqcount_t
143 * Return: count to be passed to read_seqcount_retry()
145 static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
147 unsigned ret = __read_seqcount_begin(s);
153 * read_seqcount_begin() - begin a seqcount_t read critical section
154 * @s: Pointer to seqcount_t
156 * Return: count to be passed to read_seqcount_retry()
158 static inline unsigned read_seqcount_begin(const seqcount_t *s)
160 seqcount_lockdep_reader_access(s);
161 return raw_read_seqcount_begin(s);
165 * raw_read_seqcount() - read the raw seqcount_t counter value
166 * @s: Pointer to seqcount_t
168 * raw_read_seqcount opens a read critical section of the given
169 * seqcount_t, without any lockdep checking, and without checking or
170 * masking the sequence counter LSB. Calling code is responsible for
173 * Return: count to be passed to read_seqcount_retry()
175 static inline unsigned raw_read_seqcount(const seqcount_t *s)
177 unsigned ret = READ_ONCE(s->sequence);
179 kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
184 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
185 * lockdep and w/o counter stabilization
186 * @s: Pointer to seqcount_t
188 * raw_seqcount_begin opens a read critical section of the given
189 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
190 * for the count to stabilize. If a writer is active when it begins, it
191 * will fail the read_seqcount_retry() at the end of the read critical
192 * section instead of stabilizing at the beginning of it.
194 * Use this only in special kernel hot paths where the read section is
195 * small and has a high probability of success through other external
196 * means. It will save a single branching instruction.
198 * Return: count to be passed to read_seqcount_retry()
200 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
203 * If the counter is odd, let read_seqcount_retry() fail
204 * by decrementing the counter.
206 return raw_read_seqcount(s) & ~1;
210 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
211 * @s: Pointer to seqcount_t
212 * @start: count, from read_seqcount_begin()
214 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
215 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
216 * provided before actually loading any of the variables that are to be
217 * protected in this critical section.
219 * Use carefully, only in critical code, and comment how the barrier is
222 * Return: true if a read section retry is required, else false
224 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
226 kcsan_atomic_next(0);
227 return unlikely(READ_ONCE(s->sequence) != start);
231 * read_seqcount_retry() - end a seqcount_t read critical section
232 * @s: Pointer to seqcount_t
233 * @start: count, from read_seqcount_begin()
235 * read_seqcount_retry closes the read critical section of given
236 * seqcount_t. If the critical section was invalid, it must be ignored
237 * (and typically retried).
239 * Return: true if a read section retry is required, else false
241 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
244 return __read_seqcount_retry(s, start);
248 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
249 * @s: Pointer to seqcount_t
251 static inline void raw_write_seqcount_begin(seqcount_t *s)
253 kcsan_nestable_atomic_begin();
259 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
260 * @s: Pointer to seqcount_t
262 static inline void raw_write_seqcount_end(seqcount_t *s)
266 kcsan_nestable_atomic_end();
269 static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass)
271 raw_write_seqcount_begin(s);
272 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
276 * write_seqcount_begin_nested() - start a seqcount_t write section with
277 * custom lockdep nesting level
278 * @s: Pointer to seqcount_t
279 * @subclass: lockdep nesting level
281 * See Documentation/locking/lockdep-design.rst
283 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
285 lockdep_assert_preemption_disabled();
286 __write_seqcount_begin_nested(s, subclass);
290 * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks.
292 * Use for internal seqlock.h code where it's known that preemption is
293 * already disabled. For example, seqlock_t write side functions.
295 static inline void __write_seqcount_begin(seqcount_t *s)
297 __write_seqcount_begin_nested(s, 0);
301 * write_seqcount_begin() - start a seqcount_t write side critical section
302 * @s: Pointer to seqcount_t
304 * write_seqcount_begin opens a write side critical section of the given
307 * Context: seqcount_t write side critical sections must be serialized and
308 * non-preemptible. If readers can be invoked from hardirq or softirq
309 * context, interrupts or bottom halves must be respectively disabled.
311 static inline void write_seqcount_begin(seqcount_t *s)
313 write_seqcount_begin_nested(s, 0);
317 * write_seqcount_end() - end a seqcount_t write side critical section
318 * @s: Pointer to seqcount_t
320 * The write section must've been opened with write_seqcount_begin().
322 static inline void write_seqcount_end(seqcount_t *s)
324 seqcount_release(&s->dep_map, _RET_IP_);
325 raw_write_seqcount_end(s);
329 * raw_write_seqcount_barrier() - do a seqcount_t write barrier
330 * @s: Pointer to seqcount_t
332 * This can be used to provide an ordering guarantee instead of the usual
333 * consistency guarantee. It is one wmb cheaper, because it can collapse
334 * the two back-to-back wmb()s.
336 * Note that writes surrounding the barrier should be declared atomic (e.g.
337 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
338 * atomically, avoiding compiler optimizations; b) to document which writes are
339 * meant to propagate to the reader critical section. This is necessary because
340 * neither writes before and after the barrier are enclosed in a seq-writer
341 * critical section that would ensure readers are aware of ongoing writes::
344 * bool X = true, Y = false;
351 * int s = read_seqcount_begin(&seq);
355 * } while (read_seqcount_retry(&seq, s));
362 * WRITE_ONCE(Y, true);
364 * raw_write_seqcount_barrier(seq);
366 * WRITE_ONCE(X, false);
369 static inline void raw_write_seqcount_barrier(seqcount_t *s)
371 kcsan_nestable_atomic_begin();
375 kcsan_nestable_atomic_end();
379 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
381 * @s: Pointer to seqcount_t
383 * After write_seqcount_invalidate, no seqcount_t read side operations
384 * will complete successfully and see data older than this.
386 static inline void write_seqcount_invalidate(seqcount_t *s)
389 kcsan_nestable_atomic_begin();
391 kcsan_nestable_atomic_end();
395 * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
396 * @s: Pointer to seqcount_t
398 * Use seqcount_t latching to switch between two storage places protected
399 * by a sequence counter. Doing so allows having interruptible, preemptible,
400 * seqcount_t write side critical sections.
402 * Check raw_write_seqcount_latch() for more details and a full reader and
403 * writer usage example.
405 * Return: sequence counter raw value. Use the lowest bit as an index for
406 * picking which data copy to read. The full counter value must then be
407 * checked with read_seqcount_retry().
409 static inline int raw_read_seqcount_latch(seqcount_t *s)
411 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
412 int seq = READ_ONCE(s->sequence); /* ^^^ */
417 * raw_write_seqcount_latch() - redirect readers to even/odd copy
418 * @s: Pointer to seqcount_t
420 * The latch technique is a multiversion concurrency control method that allows
421 * queries during non-atomic modifications. If you can guarantee queries never
422 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
423 * -- you most likely do not need this.
425 * Where the traditional RCU/lockless data structures rely on atomic
426 * modifications to ensure queries observe either the old or the new state the
427 * latch allows the same for non-atomic updates. The trade-off is doubling the
428 * cost of storage; we have to maintain two copies of the entire data
431 * Very simply put: we first modify one copy and then the other. This ensures
432 * there is always one copy in a stable state, ready to give us an answer.
434 * The basic form is a data structure like::
436 * struct latch_struct {
438 * struct data_struct data[2];
441 * Where a modification, which is assumed to be externally serialized, does the
444 * void latch_modify(struct latch_struct *latch, ...)
446 * smp_wmb(); // Ensure that the last data[1] update is visible
448 * smp_wmb(); // Ensure that the seqcount update is visible
450 * modify(latch->data[0], ...);
452 * smp_wmb(); // Ensure that the data[0] update is visible
454 * smp_wmb(); // Ensure that the seqcount update is visible
456 * modify(latch->data[1], ...);
459 * The query will have a form like::
461 * struct entry *latch_query(struct latch_struct *latch, ...)
463 * struct entry *entry;
467 * seq = raw_read_seqcount_latch(&latch->seq);
470 * entry = data_query(latch->data[idx], ...);
472 * // read_seqcount_retry() includes needed smp_rmb()
473 * } while (read_seqcount_retry(&latch->seq, seq));
478 * So during the modification, queries are first redirected to data[1]. Then we
479 * modify data[0]. When that is complete, we redirect queries back to data[0]
480 * and we can modify data[1].
484 * The non-requirement for atomic modifications does _NOT_ include
485 * the publishing of new entries in the case where data is a dynamic
488 * An iteration might start in data[0] and get suspended long enough
489 * to miss an entire modification sequence, once it resumes it might
490 * observe the new entry.
494 * When data is a dynamic data structure; one should use regular RCU
495 * patterns to manage the lifetimes of the objects within.
497 static inline void raw_write_seqcount_latch(seqcount_t *s)
499 smp_wmb(); /* prior stores before incrementing "sequence" */
501 smp_wmb(); /* increment "sequence" before following stores */
505 * Sequential locks (seqlock_t)
507 * Sequence counters with an embedded spinlock for writer serialization
508 * and non-preemptibility.
510 * For more info, see:
511 * - Comments on top of seqcount_t
512 * - Documentation/locking/seqlock.rst
515 struct seqcount seqcount;
519 #define __SEQLOCK_UNLOCKED(lockname) \
521 .seqcount = SEQCNT_ZERO(lockname), \
522 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
526 * seqlock_init() - dynamic initializer for seqlock_t
527 * @sl: Pointer to the seqlock_t instance
529 #define seqlock_init(sl) \
531 seqcount_init(&(sl)->seqcount); \
532 spin_lock_init(&(sl)->lock); \
536 * DEFINE_SEQLOCK() - Define a statically allocated seqlock_t
537 * @sl: Name of the seqlock_t instance
539 #define DEFINE_SEQLOCK(sl) \
540 seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
543 * read_seqbegin() - start a seqlock_t read side critical section
544 * @sl: Pointer to seqlock_t
546 * Return: count, to be passed to read_seqretry()
548 static inline unsigned read_seqbegin(const seqlock_t *sl)
550 unsigned ret = read_seqcount_begin(&sl->seqcount);
552 kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
553 kcsan_flat_atomic_begin();
558 * read_seqretry() - end a seqlock_t read side section
559 * @sl: Pointer to seqlock_t
560 * @start: count, from read_seqbegin()
562 * read_seqretry closes the read side critical section of given seqlock_t.
563 * If the critical section was invalid, it must be ignored (and typically
566 * Return: true if a read section retry is required, else false
568 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
571 * Assume not nested: read_seqretry() may be called multiple times when
572 * completing read critical section.
574 kcsan_flat_atomic_end();
576 return read_seqcount_retry(&sl->seqcount, start);
580 * write_seqlock() - start a seqlock_t write side critical section
581 * @sl: Pointer to seqlock_t
583 * write_seqlock opens a write side critical section for the given
584 * seqlock_t. It also implicitly acquires the spinlock_t embedded inside
585 * that sequential lock. All seqlock_t write side sections are thus
586 * automatically serialized and non-preemptible.
588 * Context: if the seqlock_t read section, or other write side critical
589 * sections, can be invoked from hardirq or softirq contexts, use the
590 * _irqsave or _bh variants of this function instead.
592 static inline void write_seqlock(seqlock_t *sl)
594 spin_lock(&sl->lock);
595 __write_seqcount_begin(&sl->seqcount);
599 * write_sequnlock() - end a seqlock_t write side critical section
600 * @sl: Pointer to seqlock_t
602 * write_sequnlock closes the (serialized and non-preemptible) write side
603 * critical section of given seqlock_t.
605 static inline void write_sequnlock(seqlock_t *sl)
607 write_seqcount_end(&sl->seqcount);
608 spin_unlock(&sl->lock);
612 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
613 * @sl: Pointer to seqlock_t
615 * _bh variant of write_seqlock(). Use only if the read side section, or
616 * other write side sections, can be invoked from softirq contexts.
618 static inline void write_seqlock_bh(seqlock_t *sl)
620 spin_lock_bh(&sl->lock);
621 __write_seqcount_begin(&sl->seqcount);
625 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
626 * @sl: Pointer to seqlock_t
628 * write_sequnlock_bh closes the serialized, non-preemptible, and
629 * softirqs-disabled, seqlock_t write side critical section opened with
630 * write_seqlock_bh().
632 static inline void write_sequnlock_bh(seqlock_t *sl)
634 write_seqcount_end(&sl->seqcount);
635 spin_unlock_bh(&sl->lock);
639 * write_seqlock_irq() - start a non-interruptible seqlock_t write section
640 * @sl: Pointer to seqlock_t
642 * _irq variant of write_seqlock(). Use only if the read side section, or
643 * other write sections, can be invoked from hardirq contexts.
645 static inline void write_seqlock_irq(seqlock_t *sl)
647 spin_lock_irq(&sl->lock);
648 __write_seqcount_begin(&sl->seqcount);
652 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
653 * @sl: Pointer to seqlock_t
655 * write_sequnlock_irq closes the serialized and non-interruptible
656 * seqlock_t write side section opened with write_seqlock_irq().
658 static inline void write_sequnlock_irq(seqlock_t *sl)
660 write_seqcount_end(&sl->seqcount);
661 spin_unlock_irq(&sl->lock);
664 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
668 spin_lock_irqsave(&sl->lock, flags);
669 __write_seqcount_begin(&sl->seqcount);
674 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
676 * @lock: Pointer to seqlock_t
677 * @flags: Stack-allocated storage for saving caller's local interrupt
678 * state, to be passed to write_sequnlock_irqrestore().
680 * _irqsave variant of write_seqlock(). Use it only if the read side
681 * section, or other write sections, can be invoked from hardirq context.
683 #define write_seqlock_irqsave(lock, flags) \
684 do { flags = __write_seqlock_irqsave(lock); } while (0)
687 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
689 * @sl: Pointer to seqlock_t
690 * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
692 * write_sequnlock_irqrestore closes the serialized and non-interruptible
693 * seqlock_t write section previously opened with write_seqlock_irqsave().
696 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
698 write_seqcount_end(&sl->seqcount);
699 spin_unlock_irqrestore(&sl->lock, flags);
703 * read_seqlock_excl() - begin a seqlock_t locking reader section
704 * @sl: Pointer to seqlock_t
706 * read_seqlock_excl opens a seqlock_t locking reader critical section. A
707 * locking reader exclusively locks out *both* other writers *and* other
708 * locking readers, but it does not update the embedded sequence number.
710 * Locking readers act like a normal spin_lock()/spin_unlock().
712 * Context: if the seqlock_t write section, *or other read sections*, can
713 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
714 * variant of this function instead.
716 * The opened read section must be closed with read_sequnlock_excl().
718 static inline void read_seqlock_excl(seqlock_t *sl)
720 spin_lock(&sl->lock);
724 * read_sequnlock_excl() - end a seqlock_t locking reader critical section
725 * @sl: Pointer to seqlock_t
727 static inline void read_sequnlock_excl(seqlock_t *sl)
729 spin_unlock(&sl->lock);
733 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
735 * @sl: Pointer to seqlock_t
737 * _bh variant of read_seqlock_excl(). Use this variant only if the
738 * seqlock_t write side section, *or other read sections*, can be invoked
739 * from softirq contexts.
741 static inline void read_seqlock_excl_bh(seqlock_t *sl)
743 spin_lock_bh(&sl->lock);
747 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
749 * @sl: Pointer to seqlock_t
751 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
753 spin_unlock_bh(&sl->lock);
757 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
759 * @sl: Pointer to seqlock_t
761 * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
762 * write side section, *or other read sections*, can be invoked from a
765 static inline void read_seqlock_excl_irq(seqlock_t *sl)
767 spin_lock_irq(&sl->lock);
771 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
772 * locking reader section
773 * @sl: Pointer to seqlock_t
775 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
777 spin_unlock_irq(&sl->lock);
780 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
784 spin_lock_irqsave(&sl->lock, flags);
789 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
790 * locking reader section
791 * @lock: Pointer to seqlock_t
792 * @flags: Stack-allocated storage for saving caller's local interrupt
793 * state, to be passed to read_sequnlock_excl_irqrestore().
795 * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
796 * write side section, *or other read sections*, can be invoked from a
799 #define read_seqlock_excl_irqsave(lock, flags) \
800 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
803 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
804 * locking reader section
805 * @sl: Pointer to seqlock_t
806 * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
809 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
811 spin_unlock_irqrestore(&sl->lock, flags);
815 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
816 * @lock: Pointer to seqlock_t
817 * @seq : Marker and return parameter. If the passed value is even, the
818 * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
819 * If the passed value is odd, the reader will become a *locking* reader
820 * as in read_seqlock_excl(). In the first call to this function, the
821 * caller *must* initialize and pass an even value to @seq; this way, a
822 * lockless read can be optimistically tried first.
824 * read_seqbegin_or_lock is an API designed to optimistically try a normal
825 * lockless seqlock_t read section first. If an odd counter is found, the
826 * lockless read trial has failed, and the next read iteration transforms
827 * itself into a full seqlock_t locking reader.
829 * This is typically used to avoid seqlock_t lockless readers starvation
830 * (too much retry loops) in the case of a sharp spike in write side
833 * Context: if the seqlock_t write section, *or other read sections*, can
834 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
835 * variant of this function instead.
837 * Check Documentation/locking/seqlock.rst for template example code.
839 * Return: the encountered sequence counter value, through the @seq
840 * parameter, which is overloaded as a return parameter. This returned
841 * value must be checked with need_seqretry(). If the read section need to
842 * be retried, this returned value must also be passed as the @seq
843 * parameter of the next read_seqbegin_or_lock() iteration.
845 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
847 if (!(*seq & 1)) /* Even */
848 *seq = read_seqbegin(lock);
850 read_seqlock_excl(lock);
854 * need_seqretry() - validate seqlock_t "locking or lockless" read section
855 * @lock: Pointer to seqlock_t
856 * @seq: sequence count, from read_seqbegin_or_lock()
858 * Return: true if a read section retry is required, false otherwise
860 static inline int need_seqretry(seqlock_t *lock, int seq)
862 return !(seq & 1) && read_seqretry(lock, seq);
866 * done_seqretry() - end seqlock_t "locking or lockless" reader section
867 * @lock: Pointer to seqlock_t
868 * @seq: count, from read_seqbegin_or_lock()
870 * done_seqretry finishes the seqlock_t read side critical section started
871 * with read_seqbegin_or_lock() and validated by need_seqretry().
873 static inline void done_seqretry(seqlock_t *lock, int seq)
876 read_sequnlock_excl(lock);
880 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
881 * a non-interruptible locking reader
882 * @lock: Pointer to seqlock_t
883 * @seq: Marker and return parameter. Check read_seqbegin_or_lock().
885 * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
886 * the seqlock_t write section, *or other read sections*, can be invoked
887 * from hardirq context.
889 * Note: Interrupts will be disabled only for "locking reader" mode.
893 * 1. The saved local interrupts state in case of a locking reader, to
894 * be passed to done_seqretry_irqrestore().
896 * 2. The encountered sequence counter value, returned through @seq
897 * overloaded as a return parameter. Check read_seqbegin_or_lock().
899 static inline unsigned long
900 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
902 unsigned long flags = 0;
904 if (!(*seq & 1)) /* Even */
905 *seq = read_seqbegin(lock);
907 read_seqlock_excl_irqsave(lock, flags);
913 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
914 * non-interruptible locking reader section
915 * @lock: Pointer to seqlock_t
916 * @seq: Count, from read_seqbegin_or_lock_irqsave()
917 * @flags: Caller's saved local interrupt state in case of a locking
918 * reader, also from read_seqbegin_or_lock_irqsave()
920 * This is the _irqrestore variant of done_seqretry(). The read section
921 * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
922 * by need_seqretry().
925 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
928 read_sequnlock_excl_irqrestore(lock, flags);
930 #endif /* __LINUX_SEQLOCK_H */