1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * RCU-based infrastructure for lightweight reader-writer locking
4   *
5   * Copyright (c) 2015, Red Hat, Inc.
6   *
7   * Author: Oleg Nesterov <oleg@redhat.com>
8   */
9  
10  #include <linux/rcu_sync.h>
11  #include <linux/sched.h>
12  
13  enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };
14  
15  #define	rss_lock	gp_wait.lock
16  
17  /**
18   * rcu_sync_init() - Initialize an rcu_sync structure
19   * @rsp: Pointer to rcu_sync structure to be initialized
20   */
rcu_sync_init(struct rcu_sync * rsp)21  void rcu_sync_init(struct rcu_sync *rsp)
22  {
23  	memset(rsp, 0, sizeof(*rsp));
24  	init_waitqueue_head(&rsp->gp_wait);
25  }
26  
27  static void rcu_sync_func(struct rcu_head *rhp);
28  
rcu_sync_call(struct rcu_sync * rsp)29  static void rcu_sync_call(struct rcu_sync *rsp)
30  {
31  	call_rcu_hurry(&rsp->cb_head, rcu_sync_func);
32  }
33  
34  /**
35   * rcu_sync_func() - Callback function managing reader access to fastpath
36   * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
37   *
38   * This function is passed to call_rcu() function by rcu_sync_enter() and
39   * rcu_sync_exit(), so that it is invoked after a grace period following the
40   * that invocation of enter/exit.
41   *
42   * If it is called by rcu_sync_enter() it signals that all the readers were
43   * switched onto slow path.
44   *
45   * If it is called by rcu_sync_exit() it takes action based on events that
46   * have taken place in the meantime, so that closely spaced rcu_sync_enter()
47   * and rcu_sync_exit() pairs need not wait for a grace period.
48   *
49   * If another rcu_sync_enter() is invoked before the grace period
50   * ended, reset state to allow the next rcu_sync_exit() to let the
51   * readers back onto their fastpaths (after a grace period).  If both
52   * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
53   * before the grace period ended, re-invoke call_rcu() on behalf of that
54   * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
55   * can again use their fastpaths.
56   */
rcu_sync_func(struct rcu_head * rhp)57  static void rcu_sync_func(struct rcu_head *rhp)
58  {
59  	struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
60  	unsigned long flags;
61  
62  	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
63  	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
64  
65  	spin_lock_irqsave(&rsp->rss_lock, flags);
66  	if (rsp->gp_count) {
67  		/*
68  		 * We're at least a GP after the GP_IDLE->GP_ENTER transition.
69  		 */
70  		WRITE_ONCE(rsp->gp_state, GP_PASSED);
71  		wake_up_locked(&rsp->gp_wait);
72  	} else if (rsp->gp_state == GP_REPLAY) {
73  		/*
74  		 * A new rcu_sync_exit() has happened; requeue the callback to
75  		 * catch a later GP.
76  		 */
77  		WRITE_ONCE(rsp->gp_state, GP_EXIT);
78  		rcu_sync_call(rsp);
79  	} else {
80  		/*
81  		 * We're at least a GP after the last rcu_sync_exit(); everybody
82  		 * will now have observed the write side critical section.
83  		 * Let 'em rip!
84  		 */
85  		WRITE_ONCE(rsp->gp_state, GP_IDLE);
86  	}
87  	spin_unlock_irqrestore(&rsp->rss_lock, flags);
88  }
89  
90  /**
91   * rcu_sync_enter() - Force readers onto slowpath
92   * @rsp: Pointer to rcu_sync structure to use for synchronization
93   *
94   * This function is used by updaters who need readers to make use of
95   * a slowpath during the update.  After this function returns, all
96   * subsequent calls to rcu_sync_is_idle() will return false, which
97   * tells readers to stay off their fastpaths.  A later call to
98   * rcu_sync_exit() re-enables reader fastpaths.
99   *
100   * When called in isolation, rcu_sync_enter() must wait for a grace
101   * period, however, closely spaced calls to rcu_sync_enter() can
102   * optimize away the grace-period wait via a state machine implemented
103   * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
104   */
rcu_sync_enter(struct rcu_sync * rsp)105  void rcu_sync_enter(struct rcu_sync *rsp)
106  {
107  	int gp_state;
108  
109  	spin_lock_irq(&rsp->rss_lock);
110  	gp_state = rsp->gp_state;
111  	if (gp_state == GP_IDLE) {
112  		WRITE_ONCE(rsp->gp_state, GP_ENTER);
113  		WARN_ON_ONCE(rsp->gp_count);
114  		/*
115  		 * Note that we could simply do rcu_sync_call(rsp) here and
116  		 * avoid the "if (gp_state == GP_IDLE)" block below.
117  		 *
118  		 * However, synchronize_rcu() can be faster if rcu_expedited
119  		 * or rcu_blocking_is_gp() is true.
120  		 *
121  		 * Another reason is that we can't wait for rcu callback if
122  		 * we are called at early boot time but this shouldn't happen.
123  		 */
124  	}
125  	rsp->gp_count++;
126  	spin_unlock_irq(&rsp->rss_lock);
127  
128  	if (gp_state == GP_IDLE) {
129  		/*
130  		 * See the comment above, this simply does the "synchronous"
131  		 * call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
132  		 */
133  		synchronize_rcu();
134  		rcu_sync_func(&rsp->cb_head);
135  		/* Not really needed, wait_event() would see GP_PASSED. */
136  		return;
137  	}
138  
139  	wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED);
140  }
141  
142  /**
143   * rcu_sync_exit() - Allow readers back onto fast path after grace period
144   * @rsp: Pointer to rcu_sync structure to use for synchronization
145   *
146   * This function is used by updaters who have completed, and can therefore
147   * now allow readers to make use of their fastpaths after a grace period
148   * has elapsed.  After this grace period has completed, all subsequent
149   * calls to rcu_sync_is_idle() will return true, which tells readers that
150   * they can once again use their fastpaths.
151   */
rcu_sync_exit(struct rcu_sync * rsp)152  void rcu_sync_exit(struct rcu_sync *rsp)
153  {
154  	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
155  
156  	spin_lock_irq(&rsp->rss_lock);
157  	WARN_ON_ONCE(rsp->gp_count == 0);
158  	if (!--rsp->gp_count) {
159  		if (rsp->gp_state == GP_PASSED) {
160  			WRITE_ONCE(rsp->gp_state, GP_EXIT);
161  			rcu_sync_call(rsp);
162  		} else if (rsp->gp_state == GP_EXIT) {
163  			WRITE_ONCE(rsp->gp_state, GP_REPLAY);
164  		}
165  	}
166  	spin_unlock_irq(&rsp->rss_lock);
167  }
168  
169  /**
170   * rcu_sync_dtor() - Clean up an rcu_sync structure
171   * @rsp: Pointer to rcu_sync structure to be cleaned up
172   */
rcu_sync_dtor(struct rcu_sync * rsp)173  void rcu_sync_dtor(struct rcu_sync *rsp)
174  {
175  	int gp_state;
176  
177  	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
178  
179  	spin_lock_irq(&rsp->rss_lock);
180  	WARN_ON_ONCE(rsp->gp_count);
181  	if (rsp->gp_state == GP_REPLAY)
182  		WRITE_ONCE(rsp->gp_state, GP_EXIT);
183  	gp_state = rsp->gp_state;
184  	spin_unlock_irq(&rsp->rss_lock);
185  
186  	if (gp_state != GP_IDLE) {
187  		rcu_barrier();
188  		WARN_ON_ONCE(rsp->gp_state != GP_IDLE);
189  	}
190  }
191