seqlock: Fold seqcount_LOCKNAME_init() definition
[linux-2.6-microblaze.git] / include / linux / seqlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SEQLOCK_H
3 #define __LINUX_SEQLOCK_H
4
5 /*
6  * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7  * lockless readers (read-only retry loops), and no writer starvation.
8  *
9  * See Documentation/locking/seqlock.rst
10  *
11  * Copyrights:
12  * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
13  * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/kcsan-checks.h>
18 #include <linux/lockdep.h>
19 #include <linux/mutex.h>
20 #include <linux/preempt.h>
21 #include <linux/spinlock.h>
22 #include <linux/ww_mutex.h>
23
24 #include <asm/processor.h>
25
26 /*
27  * The seqlock seqcount_t interface does not prescribe a precise sequence of
28  * read begin/retry/end. For readers, typically there is a call to
29  * read_seqcount_begin() and read_seqcount_retry(), however, there are more
30  * esoteric cases which do not follow this pattern.
31  *
32  * As a consequence, we take the following best-effort approach for raw usage
33  * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
34  * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
35  * atomics; if there is a matching read_seqcount_retry() call, no following
36  * memory operations are considered atomic. Usage of the seqlock_t interface
37  * is not affected.
38  */
39 #define KCSAN_SEQLOCK_REGION_MAX 1000
40
41 /*
42  * Sequence counters (seqcount_t)
43  *
44  * This is the raw counting mechanism, without any writer protection.
45  *
46  * Write side critical sections must be serialized and non-preemptible.
47  *
48  * If readers can be invoked from hardirq or softirq contexts,
49  * interrupts or bottom halves must also be respectively disabled before
50  * entering the write section.
51  *
52  * This mechanism can't be used if the protected data contains pointers,
53  * as the writer can invalidate a pointer that a reader is following.
54  *
55  * If the write serialization mechanism is one of the common kernel
56  * locking primitives, use a sequence counter with associated lock
57  * (seqcount_LOCKTYPE_t) instead.
58  *
59  * If it's desired to automatically handle the sequence counter writer
60  * serialization and non-preemptibility requirements, use a sequential
61  * lock (seqlock_t) instead.
62  *
63  * See Documentation/locking/seqlock.rst
64  */
65 typedef struct seqcount {
66         unsigned sequence;
67 #ifdef CONFIG_DEBUG_LOCK_ALLOC
68         struct lockdep_map dep_map;
69 #endif
70 } seqcount_t;
71
72 static inline void __seqcount_init(seqcount_t *s, const char *name,
73                                           struct lock_class_key *key)
74 {
75         /*
76          * Make sure we are not reinitializing a held lock:
77          */
78         lockdep_init_map(&s->dep_map, name, key, 0);
79         s->sequence = 0;
80 }
81
82 #ifdef CONFIG_DEBUG_LOCK_ALLOC
83
84 # define SEQCOUNT_DEP_MAP_INIT(lockname)                                \
85                 .dep_map = { .name = #lockname }
86
87 /**
88  * seqcount_init() - runtime initializer for seqcount_t
89  * @s: Pointer to the seqcount_t instance
90  */
91 # define seqcount_init(s)                                               \
92         do {                                                            \
93                 static struct lock_class_key __key;                     \
94                 __seqcount_init((s), #s, &__key);                       \
95         } while (0)
96
97 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
98 {
99         seqcount_t *l = (seqcount_t *)s;
100         unsigned long flags;
101
102         local_irq_save(flags);
103         seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
104         seqcount_release(&l->dep_map, _RET_IP_);
105         local_irq_restore(flags);
106 }
107
108 #else
109 # define SEQCOUNT_DEP_MAP_INIT(lockname)
110 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
111 # define seqcount_lockdep_reader_access(x)
112 #endif
113
114 /**
115  * SEQCNT_ZERO() - static initializer for seqcount_t
116  * @name: Name of the seqcount_t instance
117  */
118 #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
119
120 /*
121  * Sequence counters with associated locks (seqcount_LOCKTYPE_t)
122  *
123  * A sequence counter which associates the lock used for writer
124  * serialization at initialization time. This enables lockdep to validate
125  * that the write side critical section is properly serialized.
126  *
127  * For associated locks which do not implicitly disable preemption,
128  * preemption protection is enforced in the write side function.
129  *
130  * Lockdep is never used in any for the raw write variants.
131  *
132  * See Documentation/locking/seqlock.rst
133  */
134
135 #ifdef CONFIG_LOCKDEP
136 #define __SEQ_LOCK(expr)        expr
137 #else
138 #define __SEQ_LOCK(expr)
139 #endif
140
141 #define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) {                  \
142         .seqcount               = SEQCNT_ZERO(seq_name.seqcount),       \
143         __SEQ_LOCK(.lock        = (assoc_lock))                         \
144 }
145
146 /**
147  * SEQCNT_SPINLOCK_ZERO - static initializer for seqcount_spinlock_t
148  * @name:       Name of the seqcount_spinlock_t instance
149  * @lock:       Pointer to the associated spinlock
150  */
151 #define SEQCNT_SPINLOCK_ZERO(name, lock)                                \
152         SEQCOUNT_LOCKTYPE_ZERO(name, lock)
153
154 /**
155  * SEQCNT_RAW_SPINLOCK_ZERO - static initializer for seqcount_raw_spinlock_t
156  * @name:       Name of the seqcount_raw_spinlock_t instance
157  * @lock:       Pointer to the associated raw_spinlock
158  */
159 #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock)                            \
160         SEQCOUNT_LOCKTYPE_ZERO(name, lock)
161
162 /**
163  * SEQCNT_RWLOCK_ZERO - static initializer for seqcount_rwlock_t
164  * @name:       Name of the seqcount_rwlock_t instance
165  * @lock:       Pointer to the associated rwlock
166  */
167 #define SEQCNT_RWLOCK_ZERO(name, lock)                                  \
168         SEQCOUNT_LOCKTYPE_ZERO(name, lock)
169
170 /**
171  * SEQCNT_MUTEX_ZERO - static initializer for seqcount_mutex_t
172  * @name:       Name of the seqcount_mutex_t instance
173  * @lock:       Pointer to the associated mutex
174  */
175 #define SEQCNT_MUTEX_ZERO(name, lock)                                   \
176         SEQCOUNT_LOCKTYPE_ZERO(name, lock)
177
178 /**
179  * SEQCNT_WW_MUTEX_ZERO - static initializer for seqcount_ww_mutex_t
180  * @name:       Name of the seqcount_ww_mutex_t instance
181  * @lock:       Pointer to the associated ww_mutex
182  */
183 #define SEQCNT_WW_MUTEX_ZERO(name, lock)                                \
184         SEQCOUNT_LOCKTYPE_ZERO(name, lock)
185
186 /**
187  * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated
188  * @seqcount:   The real sequence counter
189  * @lock:       Pointer to the associated spinlock
190  *
191  * A plain sequence counter with external writer synchronization by a
192  * spinlock. The spinlock is associated to the sequence count in the
193  * static initializer or init function. This enables lockdep to validate
194  * that the write side critical section is properly serialized.
195  */
196
197 /**
198  * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
199  * @s:          Pointer to the seqcount_LOCKNAME_t instance
200  * @lock:       Pointer to the associated LOCKTYPE
201  */
202
203 /*
204  * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
205  * @locktype:           actual typename
206  * @lockname:           name
207  * @preemptible:        preemptibility of above locktype
208  * @lockmember:         argument for lockdep_assert_held()
209  */
210 #define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember)  \
211 typedef struct seqcount_##lockname {                                    \
212         seqcount_t              seqcount;                               \
213         __SEQ_LOCK(locktype     *lock);                                 \
214 } seqcount_##lockname##_t;                                              \
215                                                                         \
216 static __always_inline void                                             \
217 seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock)  \
218 {                                                                       \
219         seqcount_init(&s->seqcount);                                    \
220         __SEQ_LOCK(s->lock = lock);                                     \
221 }                                                                       \
222                                                                         \
223 static __always_inline seqcount_t *                                     \
224 __seqcount_##lockname##_ptr(seqcount_##lockname##_t *s)                 \
225 {                                                                       \
226         return &s->seqcount;                                            \
227 }                                                                       \
228                                                                         \
229 static __always_inline bool                                             \
230 __seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s)         \
231 {                                                                       \
232         return preemptible;                                             \
233 }                                                                       \
234                                                                         \
235 static __always_inline void                                             \
236 __seqcount_##lockname##_assert(seqcount_##lockname##_t *s)              \
237 {                                                                       \
238         __SEQ_LOCK(lockdep_assert_held(lockmember));                    \
239 }
240
241 /*
242  * __seqprop() for seqcount_t
243  */
244
245 static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
246 {
247         return s;
248 }
249
250 static inline bool __seqcount_preemptible(seqcount_t *s)
251 {
252         return false;
253 }
254
255 static inline void __seqcount_assert(seqcount_t *s)
256 {
257         lockdep_assert_preemption_disabled();
258 }
259
260 SEQCOUNT_LOCKTYPE(raw_spinlock_t,       raw_spinlock,   false,  s->lock)
261 SEQCOUNT_LOCKTYPE(spinlock_t,           spinlock,       false,  s->lock)
262 SEQCOUNT_LOCKTYPE(rwlock_t,             rwlock,         false,  s->lock)
263 SEQCOUNT_LOCKTYPE(struct mutex,         mutex,          true,   s->lock)
264 SEQCOUNT_LOCKTYPE(struct ww_mutex,      ww_mutex,       true,   &s->lock->base)
265
266 #define __seqprop_case(s, lockname, prop)                               \
267         seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
268
269 #define __seqprop(s, prop) _Generic(*(s),                               \
270         seqcount_t:             __seqcount_##prop((void *)(s)),         \
271         __seqprop_case((s),     raw_spinlock,   prop),                  \
272         __seqprop_case((s),     spinlock,       prop),                  \
273         __seqprop_case((s),     rwlock,         prop),                  \
274         __seqprop_case((s),     mutex,          prop),                  \
275         __seqprop_case((s),     ww_mutex,       prop))
276
277 #define __to_seqcount_t(s)                              __seqprop(s, ptr)
278 #define __associated_lock_exists_and_is_preemptible(s)  __seqprop(s, preemptible)
279 #define __assert_write_section_is_protected(s)          __seqprop(s, assert)
280
281 /**
282  * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
283  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
284  *
285  * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
286  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
287  * provided before actually loading any of the variables that are to be
288  * protected in this critical section.
289  *
290  * Use carefully, only in critical code, and comment how the barrier is
291  * provided.
292  *
293  * Return: count to be passed to read_seqcount_retry()
294  */
295 #define __read_seqcount_begin(s)                                        \
296         __read_seqcount_t_begin(__to_seqcount_t(s))
297
298 static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
299 {
300         unsigned ret;
301
302 repeat:
303         ret = READ_ONCE(s->sequence);
304         if (unlikely(ret & 1)) {
305                 cpu_relax();
306                 goto repeat;
307         }
308         kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
309         return ret;
310 }
311
312 /**
313  * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
314  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
315  *
316  * Return: count to be passed to read_seqcount_retry()
317  */
318 #define raw_read_seqcount_begin(s)                                      \
319         raw_read_seqcount_t_begin(__to_seqcount_t(s))
320
321 static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
322 {
323         unsigned ret = __read_seqcount_t_begin(s);
324         smp_rmb();
325         return ret;
326 }
327
328 /**
329  * read_seqcount_begin() - begin a seqcount_t read critical section
330  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
331  *
332  * Return: count to be passed to read_seqcount_retry()
333  */
334 #define read_seqcount_begin(s)                                          \
335         read_seqcount_t_begin(__to_seqcount_t(s))
336
337 static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
338 {
339         seqcount_lockdep_reader_access(s);
340         return raw_read_seqcount_t_begin(s);
341 }
342
343 /**
344  * raw_read_seqcount() - read the raw seqcount_t counter value
345  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
346  *
347  * raw_read_seqcount opens a read critical section of the given
348  * seqcount_t, without any lockdep checking, and without checking or
349  * masking the sequence counter LSB. Calling code is responsible for
350  * handling that.
351  *
352  * Return: count to be passed to read_seqcount_retry()
353  */
354 #define raw_read_seqcount(s)                                            \
355         raw_read_seqcount_t(__to_seqcount_t(s))
356
357 static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
358 {
359         unsigned ret = READ_ONCE(s->sequence);
360         smp_rmb();
361         kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
362         return ret;
363 }
364
365 /**
366  * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
367  *                        lockdep and w/o counter stabilization
368  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
369  *
370  * raw_seqcount_begin opens a read critical section of the given
371  * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
372  * for the count to stabilize. If a writer is active when it begins, it
373  * will fail the read_seqcount_retry() at the end of the read critical
374  * section instead of stabilizing at the beginning of it.
375  *
376  * Use this only in special kernel hot paths where the read section is
377  * small and has a high probability of success through other external
378  * means. It will save a single branching instruction.
379  *
380  * Return: count to be passed to read_seqcount_retry()
381  */
382 #define raw_seqcount_begin(s)                                           \
383         raw_seqcount_t_begin(__to_seqcount_t(s))
384
385 static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
386 {
387         /*
388          * If the counter is odd, let read_seqcount_retry() fail
389          * by decrementing the counter.
390          */
391         return raw_read_seqcount_t(s) & ~1;
392 }
393
394 /**
395  * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
396  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
397  * @start: count, from read_seqcount_begin()
398  *
399  * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
400  * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
401  * provided before actually loading any of the variables that are to be
402  * protected in this critical section.
403  *
404  * Use carefully, only in critical code, and comment how the barrier is
405  * provided.
406  *
407  * Return: true if a read section retry is required, else false
408  */
409 #define __read_seqcount_retry(s, start)                                 \
410         __read_seqcount_t_retry(__to_seqcount_t(s), start)
411
412 static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
413 {
414         kcsan_atomic_next(0);
415         return unlikely(READ_ONCE(s->sequence) != start);
416 }
417
418 /**
419  * read_seqcount_retry() - end a seqcount_t read critical section
420  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
421  * @start: count, from read_seqcount_begin()
422  *
423  * read_seqcount_retry closes the read critical section of given
424  * seqcount_t.  If the critical section was invalid, it must be ignored
425  * (and typically retried).
426  *
427  * Return: true if a read section retry is required, else false
428  */
429 #define read_seqcount_retry(s, start)                                   \
430         read_seqcount_t_retry(__to_seqcount_t(s), start)
431
432 static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
433 {
434         smp_rmb();
435         return __read_seqcount_t_retry(s, start);
436 }
437
438 /**
439  * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
440  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
441  */
442 #define raw_write_seqcount_begin(s)                                     \
443 do {                                                                    \
444         if (__associated_lock_exists_and_is_preemptible(s))             \
445                 preempt_disable();                                      \
446                                                                         \
447         raw_write_seqcount_t_begin(__to_seqcount_t(s));                 \
448 } while (0)
449
450 static inline void raw_write_seqcount_t_begin(seqcount_t *s)
451 {
452         kcsan_nestable_atomic_begin();
453         s->sequence++;
454         smp_wmb();
455 }
456
457 /**
458  * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
459  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
460  */
461 #define raw_write_seqcount_end(s)                                       \
462 do {                                                                    \
463         raw_write_seqcount_t_end(__to_seqcount_t(s));                   \
464                                                                         \
465         if (__associated_lock_exists_and_is_preemptible(s))             \
466                 preempt_enable();                                       \
467 } while (0)
468
469 static inline void raw_write_seqcount_t_end(seqcount_t *s)
470 {
471         smp_wmb();
472         s->sequence++;
473         kcsan_nestable_atomic_end();
474 }
475
476 /**
477  * write_seqcount_begin_nested() - start a seqcount_t write section with
478  *                                 custom lockdep nesting level
479  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
480  * @subclass: lockdep nesting level
481  *
482  * See Documentation/locking/lockdep-design.rst
483  */
484 #define write_seqcount_begin_nested(s, subclass)                        \
485 do {                                                                    \
486         __assert_write_section_is_protected(s);                         \
487                                                                         \
488         if (__associated_lock_exists_and_is_preemptible(s))             \
489                 preempt_disable();                                      \
490                                                                         \
491         write_seqcount_t_begin_nested(__to_seqcount_t(s), subclass);    \
492 } while (0)
493
494 static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
495 {
496         raw_write_seqcount_t_begin(s);
497         seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
498 }
499
500 /**
501  * write_seqcount_begin() - start a seqcount_t write side critical section
502  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
503  *
504  * write_seqcount_begin opens a write side critical section of the given
505  * seqcount_t.
506  *
507  * Context: seqcount_t write side critical sections must be serialized and
508  * non-preemptible. If readers can be invoked from hardirq or softirq
509  * context, interrupts or bottom halves must be respectively disabled.
510  */
511 #define write_seqcount_begin(s)                                         \
512 do {                                                                    \
513         __assert_write_section_is_protected(s);                         \
514                                                                         \
515         if (__associated_lock_exists_and_is_preemptible(s))             \
516                 preempt_disable();                                      \
517                                                                         \
518         write_seqcount_t_begin(__to_seqcount_t(s));                     \
519 } while (0)
520
521 static inline void write_seqcount_t_begin(seqcount_t *s)
522 {
523         write_seqcount_t_begin_nested(s, 0);
524 }
525
526 /**
527  * write_seqcount_end() - end a seqcount_t write side critical section
528  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
529  *
530  * The write section must've been opened with write_seqcount_begin().
531  */
532 #define write_seqcount_end(s)                                           \
533 do {                                                                    \
534         write_seqcount_t_end(__to_seqcount_t(s));                       \
535                                                                         \
536         if (__associated_lock_exists_and_is_preemptible(s))             \
537                 preempt_enable();                                       \
538 } while (0)
539
540 static inline void write_seqcount_t_end(seqcount_t *s)
541 {
542         seqcount_release(&s->dep_map, _RET_IP_);
543         raw_write_seqcount_t_end(s);
544 }
545
546 /**
547  * raw_write_seqcount_barrier() - do a seqcount_t write barrier
548  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
549  *
550  * This can be used to provide an ordering guarantee instead of the usual
551  * consistency guarantee. It is one wmb cheaper, because it can collapse
552  * the two back-to-back wmb()s.
553  *
554  * Note that writes surrounding the barrier should be declared atomic (e.g.
555  * via WRITE_ONCE): a) to ensure the writes become visible to other threads
556  * atomically, avoiding compiler optimizations; b) to document which writes are
557  * meant to propagate to the reader critical section. This is necessary because
558  * neither writes before and after the barrier are enclosed in a seq-writer
559  * critical section that would ensure readers are aware of ongoing writes::
560  *
561  *      seqcount_t seq;
562  *      bool X = true, Y = false;
563  *
564  *      void read(void)
565  *      {
566  *              bool x, y;
567  *
568  *              do {
569  *                      int s = read_seqcount_begin(&seq);
570  *
571  *                      x = X; y = Y;
572  *
573  *              } while (read_seqcount_retry(&seq, s));
574  *
575  *              BUG_ON(!x && !y);
576  *      }
577  *
578  *      void write(void)
579  *      {
580  *              WRITE_ONCE(Y, true);
581  *
582  *              raw_write_seqcount_barrier(seq);
583  *
584  *              WRITE_ONCE(X, false);
585  *      }
586  */
587 #define raw_write_seqcount_barrier(s)                                   \
588         raw_write_seqcount_t_barrier(__to_seqcount_t(s))
589
590 static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
591 {
592         kcsan_nestable_atomic_begin();
593         s->sequence++;
594         smp_wmb();
595         s->sequence++;
596         kcsan_nestable_atomic_end();
597 }
598
599 /**
600  * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
601  *                               side operations
602  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
603  *
604  * After write_seqcount_invalidate, no seqcount_t read side operations
605  * will complete successfully and see data older than this.
606  */
607 #define write_seqcount_invalidate(s)                                    \
608         write_seqcount_t_invalidate(__to_seqcount_t(s))
609
610 static inline void write_seqcount_t_invalidate(seqcount_t *s)
611 {
612         smp_wmb();
613         kcsan_nestable_atomic_begin();
614         s->sequence+=2;
615         kcsan_nestable_atomic_end();
616 }
617
618 /**
619  * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
620  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
621  *
622  * Use seqcount_t latching to switch between two storage places protected
623  * by a sequence counter. Doing so allows having interruptible, preemptible,
624  * seqcount_t write side critical sections.
625  *
626  * Check raw_write_seqcount_latch() for more details and a full reader and
627  * writer usage example.
628  *
629  * Return: sequence counter raw value. Use the lowest bit as an index for
630  * picking which data copy to read. The full counter value must then be
631  * checked with read_seqcount_retry().
632  */
633 #define raw_read_seqcount_latch(s)                                      \
634         raw_read_seqcount_t_latch(__to_seqcount_t(s))
635
636 static inline int raw_read_seqcount_t_latch(seqcount_t *s)
637 {
638         /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
639         int seq = READ_ONCE(s->sequence); /* ^^^ */
640         return seq;
641 }
642
643 /**
644  * raw_write_seqcount_latch() - redirect readers to even/odd copy
645  * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
646  *
647  * The latch technique is a multiversion concurrency control method that allows
648  * queries during non-atomic modifications. If you can guarantee queries never
649  * interrupt the modification -- e.g. the concurrency is strictly between CPUs
650  * -- you most likely do not need this.
651  *
652  * Where the traditional RCU/lockless data structures rely on atomic
653  * modifications to ensure queries observe either the old or the new state the
654  * latch allows the same for non-atomic updates. The trade-off is doubling the
655  * cost of storage; we have to maintain two copies of the entire data
656  * structure.
657  *
658  * Very simply put: we first modify one copy and then the other. This ensures
659  * there is always one copy in a stable state, ready to give us an answer.
660  *
661  * The basic form is a data structure like::
662  *
663  *      struct latch_struct {
664  *              seqcount_t              seq;
665  *              struct data_struct      data[2];
666  *      };
667  *
668  * Where a modification, which is assumed to be externally serialized, does the
669  * following::
670  *
671  *      void latch_modify(struct latch_struct *latch, ...)
672  *      {
673  *              smp_wmb();      // Ensure that the last data[1] update is visible
674  *              latch->seq++;
675  *              smp_wmb();      // Ensure that the seqcount update is visible
676  *
677  *              modify(latch->data[0], ...);
678  *
679  *              smp_wmb();      // Ensure that the data[0] update is visible
680  *              latch->seq++;
681  *              smp_wmb();      // Ensure that the seqcount update is visible
682  *
683  *              modify(latch->data[1], ...);
684  *      }
685  *
686  * The query will have a form like::
687  *
688  *      struct entry *latch_query(struct latch_struct *latch, ...)
689  *      {
690  *              struct entry *entry;
691  *              unsigned seq, idx;
692  *
693  *              do {
694  *                      seq = raw_read_seqcount_latch(&latch->seq);
695  *
696  *                      idx = seq & 0x01;
697  *                      entry = data_query(latch->data[idx], ...);
698  *
699  *              // read_seqcount_retry() includes needed smp_rmb()
700  *              } while (read_seqcount_retry(&latch->seq, seq));
701  *
702  *              return entry;
703  *      }
704  *
705  * So during the modification, queries are first redirected to data[1]. Then we
706  * modify data[0]. When that is complete, we redirect queries back to data[0]
707  * and we can modify data[1].
708  *
709  * NOTE:
710  *
711  *      The non-requirement for atomic modifications does _NOT_ include
712  *      the publishing of new entries in the case where data is a dynamic
713  *      data structure.
714  *
715  *      An iteration might start in data[0] and get suspended long enough
716  *      to miss an entire modification sequence, once it resumes it might
717  *      observe the new entry.
718  *
719  * NOTE:
720  *
721  *      When data is a dynamic data structure; one should use regular RCU
722  *      patterns to manage the lifetimes of the objects within.
723  */
724 #define raw_write_seqcount_latch(s)                                     \
725         raw_write_seqcount_t_latch(__to_seqcount_t(s))
726
727 static inline void raw_write_seqcount_t_latch(seqcount_t *s)
728 {
729        smp_wmb();      /* prior stores before incrementing "sequence" */
730        s->sequence++;
731        smp_wmb();      /* increment "sequence" before following stores */
732 }
733
734 /*
735  * Sequential locks (seqlock_t)
736  *
737  * Sequence counters with an embedded spinlock for writer serialization
738  * and non-preemptibility.
739  *
740  * For more info, see:
741  *    - Comments on top of seqcount_t
742  *    - Documentation/locking/seqlock.rst
743  */
744 typedef struct {
745         struct seqcount seqcount;
746         spinlock_t lock;
747 } seqlock_t;
748
749 #define __SEQLOCK_UNLOCKED(lockname)                                    \
750         {                                                               \
751                 .seqcount = SEQCNT_ZERO(lockname),                      \
752                 .lock = __SPIN_LOCK_UNLOCKED(lockname)                  \
753         }
754
755 /**
756  * seqlock_init() - dynamic initializer for seqlock_t
757  * @sl: Pointer to the seqlock_t instance
758  */
759 #define seqlock_init(sl)                                                \
760         do {                                                            \
761                 seqcount_init(&(sl)->seqcount);                         \
762                 spin_lock_init(&(sl)->lock);                            \
763         } while (0)
764
765 /**
766  * DEFINE_SEQLOCK() - Define a statically allocated seqlock_t
767  * @sl: Name of the seqlock_t instance
768  */
769 #define DEFINE_SEQLOCK(sl) \
770                 seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
771
772 /**
773  * read_seqbegin() - start a seqlock_t read side critical section
774  * @sl: Pointer to seqlock_t
775  *
776  * Return: count, to be passed to read_seqretry()
777  */
778 static inline unsigned read_seqbegin(const seqlock_t *sl)
779 {
780         unsigned ret = read_seqcount_begin(&sl->seqcount);
781
782         kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry() */
783         kcsan_flat_atomic_begin();
784         return ret;
785 }
786
787 /**
788  * read_seqretry() - end a seqlock_t read side section
789  * @sl: Pointer to seqlock_t
790  * @start: count, from read_seqbegin()
791  *
792  * read_seqretry closes the read side critical section of given seqlock_t.
793  * If the critical section was invalid, it must be ignored (and typically
794  * retried).
795  *
796  * Return: true if a read section retry is required, else false
797  */
798 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
799 {
800         /*
801          * Assume not nested: read_seqretry() may be called multiple times when
802          * completing read critical section.
803          */
804         kcsan_flat_atomic_end();
805
806         return read_seqcount_retry(&sl->seqcount, start);
807 }
808
809 /**
810  * write_seqlock() - start a seqlock_t write side critical section
811  * @sl: Pointer to seqlock_t
812  *
813  * write_seqlock opens a write side critical section for the given
814  * seqlock_t.  It also implicitly acquires the spinlock_t embedded inside
815  * that sequential lock. All seqlock_t write side sections are thus
816  * automatically serialized and non-preemptible.
817  *
818  * Context: if the seqlock_t read section, or other write side critical
819  * sections, can be invoked from hardirq or softirq contexts, use the
820  * _irqsave or _bh variants of this function instead.
821  */
822 static inline void write_seqlock(seqlock_t *sl)
823 {
824         spin_lock(&sl->lock);
825         write_seqcount_t_begin(&sl->seqcount);
826 }
827
828 /**
829  * write_sequnlock() - end a seqlock_t write side critical section
830  * @sl: Pointer to seqlock_t
831  *
832  * write_sequnlock closes the (serialized and non-preemptible) write side
833  * critical section of given seqlock_t.
834  */
835 static inline void write_sequnlock(seqlock_t *sl)
836 {
837         write_seqcount_t_end(&sl->seqcount);
838         spin_unlock(&sl->lock);
839 }
840
841 /**
842  * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
843  * @sl: Pointer to seqlock_t
844  *
845  * _bh variant of write_seqlock(). Use only if the read side section, or
846  * other write side sections, can be invoked from softirq contexts.
847  */
848 static inline void write_seqlock_bh(seqlock_t *sl)
849 {
850         spin_lock_bh(&sl->lock);
851         write_seqcount_t_begin(&sl->seqcount);
852 }
853
854 /**
855  * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
856  * @sl: Pointer to seqlock_t
857  *
858  * write_sequnlock_bh closes the serialized, non-preemptible, and
859  * softirqs-disabled, seqlock_t write side critical section opened with
860  * write_seqlock_bh().
861  */
862 static inline void write_sequnlock_bh(seqlock_t *sl)
863 {
864         write_seqcount_t_end(&sl->seqcount);
865         spin_unlock_bh(&sl->lock);
866 }
867
868 /**
869  * write_seqlock_irq() - start a non-interruptible seqlock_t write section
870  * @sl: Pointer to seqlock_t
871  *
872  * _irq variant of write_seqlock(). Use only if the read side section, or
873  * other write sections, can be invoked from hardirq contexts.
874  */
875 static inline void write_seqlock_irq(seqlock_t *sl)
876 {
877         spin_lock_irq(&sl->lock);
878         write_seqcount_t_begin(&sl->seqcount);
879 }
880
881 /**
882  * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
883  * @sl: Pointer to seqlock_t
884  *
885  * write_sequnlock_irq closes the serialized and non-interruptible
886  * seqlock_t write side section opened with write_seqlock_irq().
887  */
888 static inline void write_sequnlock_irq(seqlock_t *sl)
889 {
890         write_seqcount_t_end(&sl->seqcount);
891         spin_unlock_irq(&sl->lock);
892 }
893
894 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
895 {
896         unsigned long flags;
897
898         spin_lock_irqsave(&sl->lock, flags);
899         write_seqcount_t_begin(&sl->seqcount);
900         return flags;
901 }
902
903 /**
904  * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
905  *                           section
906  * @lock:  Pointer to seqlock_t
907  * @flags: Stack-allocated storage for saving caller's local interrupt
908  *         state, to be passed to write_sequnlock_irqrestore().
909  *
910  * _irqsave variant of write_seqlock(). Use it only if the read side
911  * section, or other write sections, can be invoked from hardirq context.
912  */
913 #define write_seqlock_irqsave(lock, flags)                              \
914         do { flags = __write_seqlock_irqsave(lock); } while (0)
915
916 /**
917  * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
918  *                                section
919  * @sl:    Pointer to seqlock_t
920  * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
921  *
922  * write_sequnlock_irqrestore closes the serialized and non-interruptible
923  * seqlock_t write section previously opened with write_seqlock_irqsave().
924  */
925 static inline void
926 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
927 {
928         write_seqcount_t_end(&sl->seqcount);
929         spin_unlock_irqrestore(&sl->lock, flags);
930 }
931
932 /**
933  * read_seqlock_excl() - begin a seqlock_t locking reader section
934  * @sl: Pointer to seqlock_t
935  *
936  * read_seqlock_excl opens a seqlock_t locking reader critical section.  A
937  * locking reader exclusively locks out *both* other writers *and* other
938  * locking readers, but it does not update the embedded sequence number.
939  *
940  * Locking readers act like a normal spin_lock()/spin_unlock().
941  *
942  * Context: if the seqlock_t write section, *or other read sections*, can
943  * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
944  * variant of this function instead.
945  *
946  * The opened read section must be closed with read_sequnlock_excl().
947  */
948 static inline void read_seqlock_excl(seqlock_t *sl)
949 {
950         spin_lock(&sl->lock);
951 }
952
953 /**
954  * read_sequnlock_excl() - end a seqlock_t locking reader critical section
955  * @sl: Pointer to seqlock_t
956  */
957 static inline void read_sequnlock_excl(seqlock_t *sl)
958 {
959         spin_unlock(&sl->lock);
960 }
961
962 /**
963  * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
964  *                          softirqs disabled
965  * @sl: Pointer to seqlock_t
966  *
967  * _bh variant of read_seqlock_excl(). Use this variant only if the
968  * seqlock_t write side section, *or other read sections*, can be invoked
969  * from softirq contexts.
970  */
971 static inline void read_seqlock_excl_bh(seqlock_t *sl)
972 {
973         spin_lock_bh(&sl->lock);
974 }
975
976 /**
977  * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
978  *                            reader section
979  * @sl: Pointer to seqlock_t
980  */
981 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
982 {
983         spin_unlock_bh(&sl->lock);
984 }
985
986 /**
987  * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
988  *                           reader section
989  * @sl: Pointer to seqlock_t
990  *
991  * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
992  * write side section, *or other read sections*, can be invoked from a
993  * hardirq context.
994  */
995 static inline void read_seqlock_excl_irq(seqlock_t *sl)
996 {
997         spin_lock_irq(&sl->lock);
998 }
999
1000 /**
1001  * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1002  *                             locking reader section
1003  * @sl: Pointer to seqlock_t
1004  */
1005 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
1006 {
1007         spin_unlock_irq(&sl->lock);
1008 }
1009
1010 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
1011 {
1012         unsigned long flags;
1013
1014         spin_lock_irqsave(&sl->lock, flags);
1015         return flags;
1016 }
1017
1018 /**
1019  * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1020  *                               locking reader section
1021  * @lock:  Pointer to seqlock_t
1022  * @flags: Stack-allocated storage for saving caller's local interrupt
1023  *         state, to be passed to read_sequnlock_excl_irqrestore().
1024  *
1025  * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1026  * write side section, *or other read sections*, can be invoked from a
1027  * hardirq context.
1028  */
1029 #define read_seqlock_excl_irqsave(lock, flags)                          \
1030         do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1031
1032 /**
1033  * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1034  *                                    locking reader section
1035  * @sl:    Pointer to seqlock_t
1036  * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1037  */
1038 static inline void
1039 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1040 {
1041         spin_unlock_irqrestore(&sl->lock, flags);
1042 }
1043
1044 /**
1045  * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1046  * @lock: Pointer to seqlock_t
1047  * @seq : Marker and return parameter. If the passed value is even, the
1048  * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1049  * If the passed value is odd, the reader will become a *locking* reader
1050  * as in read_seqlock_excl().  In the first call to this function, the
1051  * caller *must* initialize and pass an even value to @seq; this way, a
1052  * lockless read can be optimistically tried first.
1053  *
1054  * read_seqbegin_or_lock is an API designed to optimistically try a normal
1055  * lockless seqlock_t read section first.  If an odd counter is found, the
1056  * lockless read trial has failed, and the next read iteration transforms
1057  * itself into a full seqlock_t locking reader.
1058  *
1059  * This is typically used to avoid seqlock_t lockless readers starvation
1060  * (too much retry loops) in the case of a sharp spike in write side
1061  * activity.
1062  *
1063  * Context: if the seqlock_t write section, *or other read sections*, can
1064  * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1065  * variant of this function instead.
1066  *
1067  * Check Documentation/locking/seqlock.rst for template example code.
1068  *
1069  * Return: the encountered sequence counter value, through the @seq
1070  * parameter, which is overloaded as a return parameter. This returned
1071  * value must be checked with need_seqretry(). If the read section need to
1072  * be retried, this returned value must also be passed as the @seq
1073  * parameter of the next read_seqbegin_or_lock() iteration.
1074  */
1075 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1076 {
1077         if (!(*seq & 1))        /* Even */
1078                 *seq = read_seqbegin(lock);
1079         else                    /* Odd */
1080                 read_seqlock_excl(lock);
1081 }
1082
1083 /**
1084  * need_seqretry() - validate seqlock_t "locking or lockless" read section
1085  * @lock: Pointer to seqlock_t
1086  * @seq: sequence count, from read_seqbegin_or_lock()
1087  *
1088  * Return: true if a read section retry is required, false otherwise
1089  */
1090 static inline int need_seqretry(seqlock_t *lock, int seq)
1091 {
1092         return !(seq & 1) && read_seqretry(lock, seq);
1093 }
1094
1095 /**
1096  * done_seqretry() - end seqlock_t "locking or lockless" reader section
1097  * @lock: Pointer to seqlock_t
1098  * @seq: count, from read_seqbegin_or_lock()
1099  *
1100  * done_seqretry finishes the seqlock_t read side critical section started
1101  * with read_seqbegin_or_lock() and validated by need_seqretry().
1102  */
1103 static inline void done_seqretry(seqlock_t *lock, int seq)
1104 {
1105         if (seq & 1)
1106                 read_sequnlock_excl(lock);
1107 }
1108
1109 /**
1110  * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1111  *                                   a non-interruptible locking reader
1112  * @lock: Pointer to seqlock_t
1113  * @seq:  Marker and return parameter. Check read_seqbegin_or_lock().
1114  *
1115  * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1116  * the seqlock_t write section, *or other read sections*, can be invoked
1117  * from hardirq context.
1118  *
1119  * Note: Interrupts will be disabled only for "locking reader" mode.
1120  *
1121  * Return:
1122  *
1123  *   1. The saved local interrupts state in case of a locking reader, to
1124  *      be passed to done_seqretry_irqrestore().
1125  *
1126  *   2. The encountered sequence counter value, returned through @seq
1127  *      overloaded as a return parameter. Check read_seqbegin_or_lock().
1128  */
1129 static inline unsigned long
1130 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1131 {
1132         unsigned long flags = 0;
1133
1134         if (!(*seq & 1))        /* Even */
1135                 *seq = read_seqbegin(lock);
1136         else                    /* Odd */
1137                 read_seqlock_excl_irqsave(lock, flags);
1138
1139         return flags;
1140 }
1141
1142 /**
1143  * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1144  *                              non-interruptible locking reader section
1145  * @lock:  Pointer to seqlock_t
1146  * @seq:   Count, from read_seqbegin_or_lock_irqsave()
1147  * @flags: Caller's saved local interrupt state in case of a locking
1148  *         reader, also from read_seqbegin_or_lock_irqsave()
1149  *
1150  * This is the _irqrestore variant of done_seqretry(). The read section
1151  * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1152  * by need_seqretry().
1153  */
1154 static inline void
1155 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1156 {
1157         if (seq & 1)
1158                 read_sequnlock_excl_irqrestore(lock, flags);
1159 }
1160 #endif /* __LINUX_SEQLOCK_H */