1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef __LINUX_SEQLOCK_H
3  #define __LINUX_SEQLOCK_H
4  
5  /*
6   * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7   * lockless readers (read-only retry loops), and no writer starvation.
8   *
9   * See Documentation/locking/seqlock.rst
10   *
11   * Copyrights:
12   * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
13   * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
14   */
15  
16  #include <linux/compiler.h>
17  #include <linux/kcsan-checks.h>
18  #include <linux/lockdep.h>
19  #include <linux/mutex.h>
20  #include <linux/preempt.h>
21  #include <linux/seqlock_types.h>
22  #include <linux/spinlock.h>
23  
24  #include <asm/processor.h>
25  
26  /*
27   * The seqlock seqcount_t interface does not prescribe a precise sequence of
28   * read begin/retry/end. For readers, typically there is a call to
29   * read_seqcount_begin() and read_seqcount_retry(), however, there are more
30   * esoteric cases which do not follow this pattern.
31   *
32   * As a consequence, we take the following best-effort approach for raw usage
33   * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
34   * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
35   * atomics; if there is a matching read_seqcount_retry() call, no following
36   * memory operations are considered atomic. Usage of the seqlock_t interface
37   * is not affected.
38   */
39  #define KCSAN_SEQLOCK_REGION_MAX 1000
40  
__seqcount_init(seqcount_t * s,const char * name,struct lock_class_key * key)41  static inline void __seqcount_init(seqcount_t *s, const char *name,
42  					  struct lock_class_key *key)
43  {
44  	/*
45  	 * Make sure we are not reinitializing a held lock:
46  	 */
47  	lockdep_init_map(&s->dep_map, name, key, 0);
48  	s->sequence = 0;
49  }
50  
51  #ifdef CONFIG_DEBUG_LOCK_ALLOC
52  
53  # define SEQCOUNT_DEP_MAP_INIT(lockname)				\
54  		.dep_map = { .name = #lockname }
55  
56  /**
57   * seqcount_init() - runtime initializer for seqcount_t
58   * @s: Pointer to the seqcount_t instance
59   */
60  # define seqcount_init(s)						\
61  	do {								\
62  		static struct lock_class_key __key;			\
63  		__seqcount_init((s), #s, &__key);			\
64  	} while (0)
65  
seqcount_lockdep_reader_access(const seqcount_t * s)66  static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
67  {
68  	seqcount_t *l = (seqcount_t *)s;
69  	unsigned long flags;
70  
71  	local_irq_save(flags);
72  	seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
73  	seqcount_release(&l->dep_map, _RET_IP_);
74  	local_irq_restore(flags);
75  }
76  
77  #else
78  # define SEQCOUNT_DEP_MAP_INIT(lockname)
79  # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
80  # define seqcount_lockdep_reader_access(x)
81  #endif
82  
83  /**
84   * SEQCNT_ZERO() - static initializer for seqcount_t
85   * @name: Name of the seqcount_t instance
86   */
87  #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
88  
89  /*
90   * Sequence counters with associated locks (seqcount_LOCKNAME_t)
91   *
92   * A sequence counter which associates the lock used for writer
93   * serialization at initialization time. This enables lockdep to validate
94   * that the write side critical section is properly serialized.
95   *
96   * For associated locks which do not implicitly disable preemption,
97   * preemption protection is enforced in the write side function.
98   *
99   * Lockdep is never used in any for the raw write variants.
100   *
101   * See Documentation/locking/seqlock.rst
102   */
103  
104  /*
105   * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
106   * @seqcount:	The real sequence counter
107   * @lock:	Pointer to the associated lock
108   *
109   * A plain sequence counter with external writer synchronization by
110   * LOCKNAME @lock. The lock is associated to the sequence counter in the
111   * static initializer or init function. This enables lockdep to validate
112   * that the write side critical section is properly serialized.
113   *
114   * LOCKNAME:	raw_spinlock, spinlock, rwlock or mutex
115   */
116  
117  /*
118   * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
119   * @s:		Pointer to the seqcount_LOCKNAME_t instance
120   * @lock:	Pointer to the associated lock
121   */
122  
123  #define seqcount_LOCKNAME_init(s, _lock, lockname)			\
124  	do {								\
125  		seqcount_##lockname##_t *____s = (s);			\
126  		seqcount_init(&____s->seqcount);			\
127  		__SEQ_LOCK(____s->lock = (_lock));			\
128  	} while (0)
129  
130  #define seqcount_raw_spinlock_init(s, lock)	seqcount_LOCKNAME_init(s, lock, raw_spinlock)
131  #define seqcount_spinlock_init(s, lock)		seqcount_LOCKNAME_init(s, lock, spinlock)
132  #define seqcount_rwlock_init(s, lock)		seqcount_LOCKNAME_init(s, lock, rwlock)
133  #define seqcount_mutex_init(s, lock)		seqcount_LOCKNAME_init(s, lock, mutex)
134  
135  /*
136   * SEQCOUNT_LOCKNAME()	- Instantiate seqcount_LOCKNAME_t and helpers
137   * seqprop_LOCKNAME_*()	- Property accessors for seqcount_LOCKNAME_t
138   *
139   * @lockname:		"LOCKNAME" part of seqcount_LOCKNAME_t
140   * @locktype:		LOCKNAME canonical C data type
141   * @preemptible:	preemptibility of above locktype
142   * @lockbase:		prefix for associated lock/unlock
143   */
144  #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase)	\
145  static __always_inline seqcount_t *					\
146  __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s)			\
147  {									\
148  	return &s->seqcount;						\
149  }									\
150  									\
151  static __always_inline const seqcount_t *				\
152  __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s)	\
153  {									\
154  	return &s->seqcount;						\
155  }									\
156  									\
157  static __always_inline unsigned						\
158  __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s)	\
159  {									\
160  	unsigned seq = smp_load_acquire(&s->seqcount.sequence);		\
161  									\
162  	if (!IS_ENABLED(CONFIG_PREEMPT_RT))				\
163  		return seq;						\
164  									\
165  	if (preemptible && unlikely(seq & 1)) {				\
166  		__SEQ_LOCK(lockbase##_lock(s->lock));			\
167  		__SEQ_LOCK(lockbase##_unlock(s->lock));			\
168  									\
169  		/*							\
170  		 * Re-read the sequence counter since the (possibly	\
171  		 * preempted) writer made progress.			\
172  		 */							\
173  		seq = smp_load_acquire(&s->seqcount.sequence);		\
174  	}								\
175  									\
176  	return seq;							\
177  }									\
178  									\
179  static __always_inline bool						\
180  __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s)	\
181  {									\
182  	if (!IS_ENABLED(CONFIG_PREEMPT_RT))				\
183  		return preemptible;					\
184  									\
185  	/* PREEMPT_RT relies on the above LOCK+UNLOCK */		\
186  	return false;							\
187  }									\
188  									\
189  static __always_inline void						\
190  __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s)		\
191  {									\
192  	__SEQ_LOCK(lockdep_assert_held(s->lock));			\
193  }
194  
195  /*
196   * __seqprop() for seqcount_t
197   */
198  
__seqprop_ptr(seqcount_t * s)199  static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
200  {
201  	return s;
202  }
203  
__seqprop_const_ptr(const seqcount_t * s)204  static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
205  {
206  	return s;
207  }
208  
__seqprop_sequence(const seqcount_t * s)209  static inline unsigned __seqprop_sequence(const seqcount_t *s)
210  {
211  	return smp_load_acquire(&s->sequence);
212  }
213  
__seqprop_preemptible(const seqcount_t * s)214  static inline bool __seqprop_preemptible(const seqcount_t *s)
215  {
216  	return false;
217  }
218  
__seqprop_assert(const seqcount_t * s)219  static inline void __seqprop_assert(const seqcount_t *s)
220  {
221  	lockdep_assert_preemption_disabled();
222  }
223  
224  #define __SEQ_RT	IS_ENABLED(CONFIG_PREEMPT_RT)
225  
SEQCOUNT_LOCKNAME(raw_spinlock,raw_spinlock_t,false,raw_spin)226  SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t,  false,    raw_spin)
227  SEQCOUNT_LOCKNAME(spinlock,     spinlock_t,      __SEQ_RT, spin)
228  SEQCOUNT_LOCKNAME(rwlock,       rwlock_t,        __SEQ_RT, read)
229  SEQCOUNT_LOCKNAME(mutex,        struct mutex,    true,     mutex)
230  #undef SEQCOUNT_LOCKNAME
231  
232  /*
233   * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
234   * @name:	Name of the seqcount_LOCKNAME_t instance
235   * @lock:	Pointer to the associated LOCKNAME
236   */
237  
238  #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) {			\
239  	.seqcount		= SEQCNT_ZERO(seq_name.seqcount),	\
240  	__SEQ_LOCK(.lock	= (assoc_lock))				\
241  }
242  
243  #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKNAME_ZERO(name, lock)
244  #define SEQCNT_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKNAME_ZERO(name, lock)
245  #define SEQCNT_RWLOCK_ZERO(name, lock)		SEQCOUNT_LOCKNAME_ZERO(name, lock)
246  #define SEQCNT_MUTEX_ZERO(name, lock)		SEQCOUNT_LOCKNAME_ZERO(name, lock)
247  #define SEQCNT_WW_MUTEX_ZERO(name, lock) 	SEQCOUNT_LOCKNAME_ZERO(name, lock)
248  
249  #define __seqprop_case(s, lockname, prop)				\
250  	seqcount_##lockname##_t: __seqprop_##lockname##_##prop
251  
252  #define __seqprop(s, prop) _Generic(*(s),				\
253  	seqcount_t:		__seqprop_##prop,			\
254  	__seqprop_case((s),	raw_spinlock,	prop),			\
255  	__seqprop_case((s),	spinlock,	prop),			\
256  	__seqprop_case((s),	rwlock,		prop),			\
257  	__seqprop_case((s),	mutex,		prop))
258  
259  #define seqprop_ptr(s)			__seqprop(s, ptr)(s)
260  #define seqprop_const_ptr(s)		__seqprop(s, const_ptr)(s)
261  #define seqprop_sequence(s)		__seqprop(s, sequence)(s)
262  #define seqprop_preemptible(s)		__seqprop(s, preemptible)(s)
263  #define seqprop_assert(s)		__seqprop(s, assert)(s)
264  
265  /**
266   * __read_seqcount_begin() - begin a seqcount_t read section
267   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
268   *
269   * Return: count to be passed to read_seqcount_retry()
270   */
271  #define __read_seqcount_begin(s)					\
272  ({									\
273  	unsigned __seq;							\
274  									\
275  	while ((__seq = seqprop_sequence(s)) & 1)			\
276  		cpu_relax();						\
277  									\
278  	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);			\
279  	__seq;								\
280  })
281  
282  /**
283   * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
284   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
285   *
286   * Return: count to be passed to read_seqcount_retry()
287   */
288  #define raw_read_seqcount_begin(s) __read_seqcount_begin(s)
289  
290  /**
291   * read_seqcount_begin() - begin a seqcount_t read critical section
292   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
293   *
294   * Return: count to be passed to read_seqcount_retry()
295   */
296  #define read_seqcount_begin(s)						\
297  ({									\
298  	seqcount_lockdep_reader_access(seqprop_const_ptr(s));		\
299  	raw_read_seqcount_begin(s);					\
300  })
301  
302  /**
303   * raw_read_seqcount() - read the raw seqcount_t counter value
304   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
305   *
306   * raw_read_seqcount opens a read critical section of the given
307   * seqcount_t, without any lockdep checking, and without checking or
308   * masking the sequence counter LSB. Calling code is responsible for
309   * handling that.
310   *
311   * Return: count to be passed to read_seqcount_retry()
312   */
313  #define raw_read_seqcount(s)						\
314  ({									\
315  	unsigned __seq = seqprop_sequence(s);				\
316  									\
317  	kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);			\
318  	__seq;								\
319  })
320  
321  /**
322   * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
323   *                        lockdep and w/o counter stabilization
324   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
325   *
326   * raw_seqcount_begin opens a read critical section of the given
327   * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
328   * for the count to stabilize. If a writer is active when it begins, it
329   * will fail the read_seqcount_retry() at the end of the read critical
330   * section instead of stabilizing at the beginning of it.
331   *
332   * Use this only in special kernel hot paths where the read section is
333   * small and has a high probability of success through other external
334   * means. It will save a single branching instruction.
335   *
336   * Return: count to be passed to read_seqcount_retry()
337   */
338  #define raw_seqcount_begin(s)						\
339  ({									\
340  	/*								\
341  	 * If the counter is odd, let read_seqcount_retry() fail	\
342  	 * by decrementing the counter.					\
343  	 */								\
344  	raw_read_seqcount(s) & ~1;					\
345  })
346  
347  /**
348   * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
349   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
350   * @start: count, from read_seqcount_begin()
351   *
352   * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
353   * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
354   * provided before actually loading any of the variables that are to be
355   * protected in this critical section.
356   *
357   * Use carefully, only in critical code, and comment how the barrier is
358   * provided.
359   *
360   * Return: true if a read section retry is required, else false
361   */
362  #define __read_seqcount_retry(s, start)					\
363  	do___read_seqcount_retry(seqprop_const_ptr(s), start)
364  
365  static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
366  {
367  	kcsan_atomic_next(0);
368  	return unlikely(READ_ONCE(s->sequence) != start);
369  }
370  
371  /**
372   * read_seqcount_retry() - end a seqcount_t read critical section
373   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
374   * @start: count, from read_seqcount_begin()
375   *
376   * read_seqcount_retry closes the read critical section of given
377   * seqcount_t.  If the critical section was invalid, it must be ignored
378   * (and typically retried).
379   *
380   * Return: true if a read section retry is required, else false
381   */
382  #define read_seqcount_retry(s, start)					\
383  	do_read_seqcount_retry(seqprop_const_ptr(s), start)
384  
do_read_seqcount_retry(const seqcount_t * s,unsigned start)385  static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
386  {
387  	smp_rmb();
388  	return do___read_seqcount_retry(s, start);
389  }
390  
391  /**
392   * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
393   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
394   *
395   * Context: check write_seqcount_begin()
396   */
397  #define raw_write_seqcount_begin(s)					\
398  do {									\
399  	if (seqprop_preemptible(s))					\
400  		preempt_disable();					\
401  									\
402  	do_raw_write_seqcount_begin(seqprop_ptr(s));			\
403  } while (0)
404  
do_raw_write_seqcount_begin(seqcount_t * s)405  static inline void do_raw_write_seqcount_begin(seqcount_t *s)
406  {
407  	kcsan_nestable_atomic_begin();
408  	s->sequence++;
409  	smp_wmb();
410  }
411  
412  /**
413   * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
414   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
415   *
416   * Context: check write_seqcount_end()
417   */
418  #define raw_write_seqcount_end(s)					\
419  do {									\
420  	do_raw_write_seqcount_end(seqprop_ptr(s));			\
421  									\
422  	if (seqprop_preemptible(s))					\
423  		preempt_enable();					\
424  } while (0)
425  
do_raw_write_seqcount_end(seqcount_t * s)426  static inline void do_raw_write_seqcount_end(seqcount_t *s)
427  {
428  	smp_wmb();
429  	s->sequence++;
430  	kcsan_nestable_atomic_end();
431  }
432  
433  /**
434   * write_seqcount_begin_nested() - start a seqcount_t write section with
435   *                                 custom lockdep nesting level
436   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
437   * @subclass: lockdep nesting level
438   *
439   * See Documentation/locking/lockdep-design.rst
440   * Context: check write_seqcount_begin()
441   */
442  #define write_seqcount_begin_nested(s, subclass)			\
443  do {									\
444  	seqprop_assert(s);						\
445  									\
446  	if (seqprop_preemptible(s))					\
447  		preempt_disable();					\
448  									\
449  	do_write_seqcount_begin_nested(seqprop_ptr(s), subclass);	\
450  } while (0)
451  
do_write_seqcount_begin_nested(seqcount_t * s,int subclass)452  static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
453  {
454  	seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
455  	do_raw_write_seqcount_begin(s);
456  }
457  
458  /**
459   * write_seqcount_begin() - start a seqcount_t write side critical section
460   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
461   *
462   * Context: sequence counter write side sections must be serialized and
463   * non-preemptible. Preemption will be automatically disabled if and
464   * only if the seqcount write serialization lock is associated, and
465   * preemptible.  If readers can be invoked from hardirq or softirq
466   * context, interrupts or bottom halves must be respectively disabled.
467   */
468  #define write_seqcount_begin(s)						\
469  do {									\
470  	seqprop_assert(s);						\
471  									\
472  	if (seqprop_preemptible(s))					\
473  		preempt_disable();					\
474  									\
475  	do_write_seqcount_begin(seqprop_ptr(s));			\
476  } while (0)
477  
do_write_seqcount_begin(seqcount_t * s)478  static inline void do_write_seqcount_begin(seqcount_t *s)
479  {
480  	do_write_seqcount_begin_nested(s, 0);
481  }
482  
483  /**
484   * write_seqcount_end() - end a seqcount_t write side critical section
485   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
486   *
487   * Context: Preemption will be automatically re-enabled if and only if
488   * the seqcount write serialization lock is associated, and preemptible.
489   */
490  #define write_seqcount_end(s)						\
491  do {									\
492  	do_write_seqcount_end(seqprop_ptr(s));				\
493  									\
494  	if (seqprop_preemptible(s))					\
495  		preempt_enable();					\
496  } while (0)
497  
do_write_seqcount_end(seqcount_t * s)498  static inline void do_write_seqcount_end(seqcount_t *s)
499  {
500  	seqcount_release(&s->dep_map, _RET_IP_);
501  	do_raw_write_seqcount_end(s);
502  }
503  
504  /**
505   * raw_write_seqcount_barrier() - do a seqcount_t write barrier
506   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
507   *
508   * This can be used to provide an ordering guarantee instead of the usual
509   * consistency guarantee. It is one wmb cheaper, because it can collapse
510   * the two back-to-back wmb()s.
511   *
512   * Note that writes surrounding the barrier should be declared atomic (e.g.
513   * via WRITE_ONCE): a) to ensure the writes become visible to other threads
514   * atomically, avoiding compiler optimizations; b) to document which writes are
515   * meant to propagate to the reader critical section. This is necessary because
516   * neither writes before nor after the barrier are enclosed in a seq-writer
517   * critical section that would ensure readers are aware of ongoing writes::
518   *
519   *	seqcount_t seq;
520   *	bool X = true, Y = false;
521   *
522   *	void read(void)
523   *	{
524   *		bool x, y;
525   *
526   *		do {
527   *			int s = read_seqcount_begin(&seq);
528   *
529   *			x = X; y = Y;
530   *
531   *		} while (read_seqcount_retry(&seq, s));
532   *
533   *		BUG_ON(!x && !y);
534   *      }
535   *
536   *      void write(void)
537   *      {
538   *		WRITE_ONCE(Y, true);
539   *
540   *		raw_write_seqcount_barrier(seq);
541   *
542   *		WRITE_ONCE(X, false);
543   *      }
544   */
545  #define raw_write_seqcount_barrier(s)					\
546  	do_raw_write_seqcount_barrier(seqprop_ptr(s))
547  
do_raw_write_seqcount_barrier(seqcount_t * s)548  static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
549  {
550  	kcsan_nestable_atomic_begin();
551  	s->sequence++;
552  	smp_wmb();
553  	s->sequence++;
554  	kcsan_nestable_atomic_end();
555  }
556  
557  /**
558   * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
559   *                               side operations
560   * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
561   *
562   * After write_seqcount_invalidate, no seqcount_t read side operations
563   * will complete successfully and see data older than this.
564   */
565  #define write_seqcount_invalidate(s)					\
566  	do_write_seqcount_invalidate(seqprop_ptr(s))
567  
do_write_seqcount_invalidate(seqcount_t * s)568  static inline void do_write_seqcount_invalidate(seqcount_t *s)
569  {
570  	smp_wmb();
571  	kcsan_nestable_atomic_begin();
572  	s->sequence+=2;
573  	kcsan_nestable_atomic_end();
574  }
575  
576  /*
577   * Latch sequence counters (seqcount_latch_t)
578   *
579   * A sequence counter variant where the counter even/odd value is used to
580   * switch between two copies of protected data. This allows the read path,
581   * typically NMIs, to safely interrupt the write side critical section.
582   *
583   * As the write sections are fully preemptible, no special handling for
584   * PREEMPT_RT is needed.
585   */
586  typedef struct {
587  	seqcount_t seqcount;
588  } seqcount_latch_t;
589  
590  /**
591   * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
592   * @seq_name: Name of the seqcount_latch_t instance
593   */
594  #define SEQCNT_LATCH_ZERO(seq_name) {					\
595  	.seqcount		= SEQCNT_ZERO(seq_name.seqcount),	\
596  }
597  
598  /**
599   * seqcount_latch_init() - runtime initializer for seqcount_latch_t
600   * @s: Pointer to the seqcount_latch_t instance
601   */
602  #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
603  
604  /**
605   * raw_read_seqcount_latch() - pick even/odd latch data copy
606   * @s: Pointer to seqcount_latch_t
607   *
608   * See raw_write_seqcount_latch() for details and a full reader/writer
609   * usage example.
610   *
611   * Return: sequence counter raw value. Use the lowest bit as an index for
612   * picking which data copy to read. The full counter must then be checked
613   * with raw_read_seqcount_latch_retry().
614   */
raw_read_seqcount_latch(const seqcount_latch_t * s)615  static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
616  {
617  	/*
618  	 * Pairs with the first smp_wmb() in raw_write_seqcount_latch().
619  	 * Due to the dependent load, a full smp_rmb() is not needed.
620  	 */
621  	return READ_ONCE(s->seqcount.sequence);
622  }
623  
624  /**
625   * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
626   * @s:		Pointer to seqcount_latch_t
627   * @start:	count, from raw_read_seqcount_latch()
628   *
629   * Return: true if a read section retry is required, else false
630   */
631  static __always_inline int
raw_read_seqcount_latch_retry(const seqcount_latch_t * s,unsigned start)632  raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
633  {
634  	smp_rmb();
635  	return unlikely(READ_ONCE(s->seqcount.sequence) != start);
636  }
637  
638  /**
639   * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
640   * @s: Pointer to seqcount_latch_t
641   *
642   * The latch technique is a multiversion concurrency control method that allows
643   * queries during non-atomic modifications. If you can guarantee queries never
644   * interrupt the modification -- e.g. the concurrency is strictly between CPUs
645   * -- you most likely do not need this.
646   *
647   * Where the traditional RCU/lockless data structures rely on atomic
648   * modifications to ensure queries observe either the old or the new state the
649   * latch allows the same for non-atomic updates. The trade-off is doubling the
650   * cost of storage; we have to maintain two copies of the entire data
651   * structure.
652   *
653   * Very simply put: we first modify one copy and then the other. This ensures
654   * there is always one copy in a stable state, ready to give us an answer.
655   *
656   * The basic form is a data structure like::
657   *
658   *	struct latch_struct {
659   *		seqcount_latch_t	seq;
660   *		struct data_struct	data[2];
661   *	};
662   *
663   * Where a modification, which is assumed to be externally serialized, does the
664   * following::
665   *
666   *	void latch_modify(struct latch_struct *latch, ...)
667   *	{
668   *		smp_wmb();	// Ensure that the last data[1] update is visible
669   *		latch->seq.sequence++;
670   *		smp_wmb();	// Ensure that the seqcount update is visible
671   *
672   *		modify(latch->data[0], ...);
673   *
674   *		smp_wmb();	// Ensure that the data[0] update is visible
675   *		latch->seq.sequence++;
676   *		smp_wmb();	// Ensure that the seqcount update is visible
677   *
678   *		modify(latch->data[1], ...);
679   *	}
680   *
681   * The query will have a form like::
682   *
683   *	struct entry *latch_query(struct latch_struct *latch, ...)
684   *	{
685   *		struct entry *entry;
686   *		unsigned seq, idx;
687   *
688   *		do {
689   *			seq = raw_read_seqcount_latch(&latch->seq);
690   *
691   *			idx = seq & 0x01;
692   *			entry = data_query(latch->data[idx], ...);
693   *
694   *		// This includes needed smp_rmb()
695   *		} while (raw_read_seqcount_latch_retry(&latch->seq, seq));
696   *
697   *		return entry;
698   *	}
699   *
700   * So during the modification, queries are first redirected to data[1]. Then we
701   * modify data[0]. When that is complete, we redirect queries back to data[0]
702   * and we can modify data[1].
703   *
704   * NOTE:
705   *
706   *	The non-requirement for atomic modifications does _NOT_ include
707   *	the publishing of new entries in the case where data is a dynamic
708   *	data structure.
709   *
710   *	An iteration might start in data[0] and get suspended long enough
711   *	to miss an entire modification sequence, once it resumes it might
712   *	observe the new entry.
713   *
714   * NOTE2:
715   *
716   *	When data is a dynamic data structure; one should use regular RCU
717   *	patterns to manage the lifetimes of the objects within.
718   */
raw_write_seqcount_latch(seqcount_latch_t * s)719  static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
720  {
721  	smp_wmb();	/* prior stores before incrementing "sequence" */
722  	s->seqcount.sequence++;
723  	smp_wmb();      /* increment "sequence" before following stores */
724  }
725  
726  #define __SEQLOCK_UNLOCKED(lockname)					\
727  	{								\
728  		.seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
729  		.lock =	__SPIN_LOCK_UNLOCKED(lockname)			\
730  	}
731  
732  /**
733   * seqlock_init() - dynamic initializer for seqlock_t
734   * @sl: Pointer to the seqlock_t instance
735   */
736  #define seqlock_init(sl)						\
737  	do {								\
738  		spin_lock_init(&(sl)->lock);				\
739  		seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock);	\
740  	} while (0)
741  
742  /**
743   * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
744   * @sl: Name of the seqlock_t instance
745   */
746  #define DEFINE_SEQLOCK(sl) \
747  		seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
748  
749  /**
750   * read_seqbegin() - start a seqlock_t read side critical section
751   * @sl: Pointer to seqlock_t
752   *
753   * Return: count, to be passed to read_seqretry()
754   */
read_seqbegin(const seqlock_t * sl)755  static inline unsigned read_seqbegin(const seqlock_t *sl)
756  {
757  	unsigned ret = read_seqcount_begin(&sl->seqcount);
758  
759  	kcsan_atomic_next(0);  /* non-raw usage, assume closing read_seqretry() */
760  	kcsan_flat_atomic_begin();
761  	return ret;
762  }
763  
764  /**
765   * read_seqretry() - end a seqlock_t read side section
766   * @sl: Pointer to seqlock_t
767   * @start: count, from read_seqbegin()
768   *
769   * read_seqretry closes the read side critical section of given seqlock_t.
770   * If the critical section was invalid, it must be ignored (and typically
771   * retried).
772   *
773   * Return: true if a read section retry is required, else false
774   */
read_seqretry(const seqlock_t * sl,unsigned start)775  static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
776  {
777  	/*
778  	 * Assume not nested: read_seqretry() may be called multiple times when
779  	 * completing read critical section.
780  	 */
781  	kcsan_flat_atomic_end();
782  
783  	return read_seqcount_retry(&sl->seqcount, start);
784  }
785  
786  /*
787   * For all seqlock_t write side functions, use the internal
788   * do_write_seqcount_begin() instead of generic write_seqcount_begin().
789   * This way, no redundant lockdep_assert_held() checks are added.
790   */
791  
792  /**
793   * write_seqlock() - start a seqlock_t write side critical section
794   * @sl: Pointer to seqlock_t
795   *
796   * write_seqlock opens a write side critical section for the given
797   * seqlock_t.  It also implicitly acquires the spinlock_t embedded inside
798   * that sequential lock. All seqlock_t write side sections are thus
799   * automatically serialized and non-preemptible.
800   *
801   * Context: if the seqlock_t read section, or other write side critical
802   * sections, can be invoked from hardirq or softirq contexts, use the
803   * _irqsave or _bh variants of this function instead.
804   */
write_seqlock(seqlock_t * sl)805  static inline void write_seqlock(seqlock_t *sl)
806  {
807  	spin_lock(&sl->lock);
808  	do_write_seqcount_begin(&sl->seqcount.seqcount);
809  }
810  
811  /**
812   * write_sequnlock() - end a seqlock_t write side critical section
813   * @sl: Pointer to seqlock_t
814   *
815   * write_sequnlock closes the (serialized and non-preemptible) write side
816   * critical section of given seqlock_t.
817   */
write_sequnlock(seqlock_t * sl)818  static inline void write_sequnlock(seqlock_t *sl)
819  {
820  	do_write_seqcount_end(&sl->seqcount.seqcount);
821  	spin_unlock(&sl->lock);
822  }
823  
824  /**
825   * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
826   * @sl: Pointer to seqlock_t
827   *
828   * _bh variant of write_seqlock(). Use only if the read side section, or
829   * other write side sections, can be invoked from softirq contexts.
830   */
write_seqlock_bh(seqlock_t * sl)831  static inline void write_seqlock_bh(seqlock_t *sl)
832  {
833  	spin_lock_bh(&sl->lock);
834  	do_write_seqcount_begin(&sl->seqcount.seqcount);
835  }
836  
837  /**
838   * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
839   * @sl: Pointer to seqlock_t
840   *
841   * write_sequnlock_bh closes the serialized, non-preemptible, and
842   * softirqs-disabled, seqlock_t write side critical section opened with
843   * write_seqlock_bh().
844   */
write_sequnlock_bh(seqlock_t * sl)845  static inline void write_sequnlock_bh(seqlock_t *sl)
846  {
847  	do_write_seqcount_end(&sl->seqcount.seqcount);
848  	spin_unlock_bh(&sl->lock);
849  }
850  
851  /**
852   * write_seqlock_irq() - start a non-interruptible seqlock_t write section
853   * @sl: Pointer to seqlock_t
854   *
855   * _irq variant of write_seqlock(). Use only if the read side section, or
856   * other write sections, can be invoked from hardirq contexts.
857   */
write_seqlock_irq(seqlock_t * sl)858  static inline void write_seqlock_irq(seqlock_t *sl)
859  {
860  	spin_lock_irq(&sl->lock);
861  	do_write_seqcount_begin(&sl->seqcount.seqcount);
862  }
863  
864  /**
865   * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
866   * @sl: Pointer to seqlock_t
867   *
868   * write_sequnlock_irq closes the serialized and non-interruptible
869   * seqlock_t write side section opened with write_seqlock_irq().
870   */
write_sequnlock_irq(seqlock_t * sl)871  static inline void write_sequnlock_irq(seqlock_t *sl)
872  {
873  	do_write_seqcount_end(&sl->seqcount.seqcount);
874  	spin_unlock_irq(&sl->lock);
875  }
876  
__write_seqlock_irqsave(seqlock_t * sl)877  static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
878  {
879  	unsigned long flags;
880  
881  	spin_lock_irqsave(&sl->lock, flags);
882  	do_write_seqcount_begin(&sl->seqcount.seqcount);
883  	return flags;
884  }
885  
886  /**
887   * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
888   *                           section
889   * @lock:  Pointer to seqlock_t
890   * @flags: Stack-allocated storage for saving caller's local interrupt
891   *         state, to be passed to write_sequnlock_irqrestore().
892   *
893   * _irqsave variant of write_seqlock(). Use it only if the read side
894   * section, or other write sections, can be invoked from hardirq context.
895   */
896  #define write_seqlock_irqsave(lock, flags)				\
897  	do { flags = __write_seqlock_irqsave(lock); } while (0)
898  
899  /**
900   * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
901   *                                section
902   * @sl:    Pointer to seqlock_t
903   * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
904   *
905   * write_sequnlock_irqrestore closes the serialized and non-interruptible
906   * seqlock_t write section previously opened with write_seqlock_irqsave().
907   */
908  static inline void
write_sequnlock_irqrestore(seqlock_t * sl,unsigned long flags)909  write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
910  {
911  	do_write_seqcount_end(&sl->seqcount.seqcount);
912  	spin_unlock_irqrestore(&sl->lock, flags);
913  }
914  
915  /**
916   * read_seqlock_excl() - begin a seqlock_t locking reader section
917   * @sl:	Pointer to seqlock_t
918   *
919   * read_seqlock_excl opens a seqlock_t locking reader critical section.  A
920   * locking reader exclusively locks out *both* other writers *and* other
921   * locking readers, but it does not update the embedded sequence number.
922   *
923   * Locking readers act like a normal spin_lock()/spin_unlock().
924   *
925   * Context: if the seqlock_t write section, *or other read sections*, can
926   * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
927   * variant of this function instead.
928   *
929   * The opened read section must be closed with read_sequnlock_excl().
930   */
read_seqlock_excl(seqlock_t * sl)931  static inline void read_seqlock_excl(seqlock_t *sl)
932  {
933  	spin_lock(&sl->lock);
934  }
935  
936  /**
937   * read_sequnlock_excl() - end a seqlock_t locking reader critical section
938   * @sl: Pointer to seqlock_t
939   */
read_sequnlock_excl(seqlock_t * sl)940  static inline void read_sequnlock_excl(seqlock_t *sl)
941  {
942  	spin_unlock(&sl->lock);
943  }
944  
945  /**
946   * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
947   *			    softirqs disabled
948   * @sl: Pointer to seqlock_t
949   *
950   * _bh variant of read_seqlock_excl(). Use this variant only if the
951   * seqlock_t write side section, *or other read sections*, can be invoked
952   * from softirq contexts.
953   */
read_seqlock_excl_bh(seqlock_t * sl)954  static inline void read_seqlock_excl_bh(seqlock_t *sl)
955  {
956  	spin_lock_bh(&sl->lock);
957  }
958  
959  /**
960   * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
961   *			      reader section
962   * @sl: Pointer to seqlock_t
963   */
read_sequnlock_excl_bh(seqlock_t * sl)964  static inline void read_sequnlock_excl_bh(seqlock_t *sl)
965  {
966  	spin_unlock_bh(&sl->lock);
967  }
968  
969  /**
970   * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
971   *			     reader section
972   * @sl: Pointer to seqlock_t
973   *
974   * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
975   * write side section, *or other read sections*, can be invoked from a
976   * hardirq context.
977   */
read_seqlock_excl_irq(seqlock_t * sl)978  static inline void read_seqlock_excl_irq(seqlock_t *sl)
979  {
980  	spin_lock_irq(&sl->lock);
981  }
982  
983  /**
984   * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
985   *                             locking reader section
986   * @sl: Pointer to seqlock_t
987   */
read_sequnlock_excl_irq(seqlock_t * sl)988  static inline void read_sequnlock_excl_irq(seqlock_t *sl)
989  {
990  	spin_unlock_irq(&sl->lock);
991  }
992  
__read_seqlock_excl_irqsave(seqlock_t * sl)993  static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
994  {
995  	unsigned long flags;
996  
997  	spin_lock_irqsave(&sl->lock, flags);
998  	return flags;
999  }
1000  
1001  /**
1002   * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1003   *				 locking reader section
1004   * @lock:  Pointer to seqlock_t
1005   * @flags: Stack-allocated storage for saving caller's local interrupt
1006   *         state, to be passed to read_sequnlock_excl_irqrestore().
1007   *
1008   * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
1009   * write side section, *or other read sections*, can be invoked from a
1010   * hardirq context.
1011   */
1012  #define read_seqlock_excl_irqsave(lock, flags)				\
1013  	do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
1014  
1015  /**
1016   * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1017   *				      locking reader section
1018   * @sl:    Pointer to seqlock_t
1019   * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
1020   */
1021  static inline void
read_sequnlock_excl_irqrestore(seqlock_t * sl,unsigned long flags)1022  read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
1023  {
1024  	spin_unlock_irqrestore(&sl->lock, flags);
1025  }
1026  
1027  /**
1028   * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1029   * @lock: Pointer to seqlock_t
1030   * @seq : Marker and return parameter. If the passed value is even, the
1031   * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1032   * If the passed value is odd, the reader will become a *locking* reader
1033   * as in read_seqlock_excl().  In the first call to this function, the
1034   * caller *must* initialize and pass an even value to @seq; this way, a
1035   * lockless read can be optimistically tried first.
1036   *
1037   * read_seqbegin_or_lock is an API designed to optimistically try a normal
1038   * lockless seqlock_t read section first.  If an odd counter is found, the
1039   * lockless read trial has failed, and the next read iteration transforms
1040   * itself into a full seqlock_t locking reader.
1041   *
1042   * This is typically used to avoid seqlock_t lockless readers starvation
1043   * (too much retry loops) in the case of a sharp spike in write side
1044   * activity.
1045   *
1046   * Context: if the seqlock_t write section, *or other read sections*, can
1047   * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
1048   * variant of this function instead.
1049   *
1050   * Check Documentation/locking/seqlock.rst for template example code.
1051   *
1052   * Return: the encountered sequence counter value, through the @seq
1053   * parameter, which is overloaded as a return parameter. This returned
1054   * value must be checked with need_seqretry(). If the read section need to
1055   * be retried, this returned value must also be passed as the @seq
1056   * parameter of the next read_seqbegin_or_lock() iteration.
1057   */
read_seqbegin_or_lock(seqlock_t * lock,int * seq)1058  static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
1059  {
1060  	if (!(*seq & 1))	/* Even */
1061  		*seq = read_seqbegin(lock);
1062  	else			/* Odd */
1063  		read_seqlock_excl(lock);
1064  }
1065  
1066  /**
1067   * need_seqretry() - validate seqlock_t "locking or lockless" read section
1068   * @lock: Pointer to seqlock_t
1069   * @seq: sequence count, from read_seqbegin_or_lock()
1070   *
1071   * Return: true if a read section retry is required, false otherwise
1072   */
need_seqretry(seqlock_t * lock,int seq)1073  static inline int need_seqretry(seqlock_t *lock, int seq)
1074  {
1075  	return !(seq & 1) && read_seqretry(lock, seq);
1076  }
1077  
1078  /**
1079   * done_seqretry() - end seqlock_t "locking or lockless" reader section
1080   * @lock: Pointer to seqlock_t
1081   * @seq: count, from read_seqbegin_or_lock()
1082   *
1083   * done_seqretry finishes the seqlock_t read side critical section started
1084   * with read_seqbegin_or_lock() and validated by need_seqretry().
1085   */
done_seqretry(seqlock_t * lock,int seq)1086  static inline void done_seqretry(seqlock_t *lock, int seq)
1087  {
1088  	if (seq & 1)
1089  		read_sequnlock_excl(lock);
1090  }
1091  
1092  /**
1093   * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1094   *                                   a non-interruptible locking reader
1095   * @lock: Pointer to seqlock_t
1096   * @seq:  Marker and return parameter. Check read_seqbegin_or_lock().
1097   *
1098   * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
1099   * the seqlock_t write section, *or other read sections*, can be invoked
1100   * from hardirq context.
1101   *
1102   * Note: Interrupts will be disabled only for "locking reader" mode.
1103   *
1104   * Return:
1105   *
1106   *   1. The saved local interrupts state in case of a locking reader, to
1107   *      be passed to done_seqretry_irqrestore().
1108   *
1109   *   2. The encountered sequence counter value, returned through @seq
1110   *      overloaded as a return parameter. Check read_seqbegin_or_lock().
1111   */
1112  static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t * lock,int * seq)1113  read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
1114  {
1115  	unsigned long flags = 0;
1116  
1117  	if (!(*seq & 1))	/* Even */
1118  		*seq = read_seqbegin(lock);
1119  	else			/* Odd */
1120  		read_seqlock_excl_irqsave(lock, flags);
1121  
1122  	return flags;
1123  }
1124  
1125  /**
1126   * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1127   *				non-interruptible locking reader section
1128   * @lock:  Pointer to seqlock_t
1129   * @seq:   Count, from read_seqbegin_or_lock_irqsave()
1130   * @flags: Caller's saved local interrupt state in case of a locking
1131   *	   reader, also from read_seqbegin_or_lock_irqsave()
1132   *
1133   * This is the _irqrestore variant of done_seqretry(). The read section
1134   * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
1135   * by need_seqretry().
1136   */
1137  static inline void
done_seqretry_irqrestore(seqlock_t * lock,int seq,unsigned long flags)1138  done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
1139  {
1140  	if (seq & 1)
1141  		read_sequnlock_excl_irqrestore(lock, flags);
1142  }
1143  #endif /* __LINUX_SEQLOCK_H */
1144