1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *    Out of line spinlock code.
4   *
5   *    Copyright IBM Corp. 2004, 2006
6   *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7   */
8  
9  #include <linux/types.h>
10  #include <linux/export.h>
11  #include <linux/spinlock.h>
12  #include <linux/jiffies.h>
13  #include <linux/init.h>
14  #include <linux/smp.h>
15  #include <linux/percpu.h>
16  #include <linux/io.h>
17  #include <asm/alternative.h>
18  
19  int spin_retry = -1;
20  
spin_retry_init(void)21  static int __init spin_retry_init(void)
22  {
23  	if (spin_retry < 0)
24  		spin_retry = 1000;
25  	return 0;
26  }
27  early_initcall(spin_retry_init);
28  
29  /*
30   * spin_retry= parameter
31   */
spin_retry_setup(char * str)32  static int __init spin_retry_setup(char *str)
33  {
34  	spin_retry = simple_strtoul(str, &str, 0);
35  	return 1;
36  }
37  __setup("spin_retry=", spin_retry_setup);
38  
39  struct spin_wait {
40  	struct spin_wait *next, *prev;
41  	int node_id;
42  } __aligned(32);
43  
44  static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
45  
46  #define _Q_LOCK_CPU_OFFSET	0
47  #define _Q_LOCK_STEAL_OFFSET	16
48  #define _Q_TAIL_IDX_OFFSET	18
49  #define _Q_TAIL_CPU_OFFSET	20
50  
51  #define _Q_LOCK_CPU_MASK	0x0000ffff
52  #define _Q_LOCK_STEAL_ADD	0x00010000
53  #define _Q_LOCK_STEAL_MASK	0x00030000
54  #define _Q_TAIL_IDX_MASK	0x000c0000
55  #define _Q_TAIL_CPU_MASK	0xfff00000
56  
57  #define _Q_LOCK_MASK		(_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
58  #define _Q_TAIL_MASK		(_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
59  
arch_spin_lock_setup(int cpu)60  void arch_spin_lock_setup(int cpu)
61  {
62  	struct spin_wait *node;
63  	int ix;
64  
65  	node = per_cpu_ptr(&spin_wait[0], cpu);
66  	for (ix = 0; ix < 4; ix++, node++) {
67  		memset(node, 0, sizeof(*node));
68  		node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
69  			(ix << _Q_TAIL_IDX_OFFSET);
70  	}
71  }
72  
arch_load_niai4(int * lock)73  static inline int arch_load_niai4(int *lock)
74  {
75  	int owner;
76  
77  	asm_inline volatile(
78  		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", ALT_FACILITY(49)) /* NIAI 4 */
79  		"	l	%0,%1\n"
80  		: "=d" (owner) : "Q" (*lock) : "memory");
81  	return owner;
82  }
83  
arch_cmpxchg_niai8(int * lock,int old,int new)84  static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
85  {
86  	int expected = old;
87  
88  	asm_inline volatile(
89  		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
90  		"	cs	%0,%3,%1\n"
91  		: "=d" (old), "=Q" (*lock)
92  		: "0" (old), "d" (new), "Q" (*lock)
93  		: "cc", "memory");
94  	return expected == old;
95  }
96  
arch_spin_decode_tail(int lock)97  static inline struct spin_wait *arch_spin_decode_tail(int lock)
98  {
99  	int ix, cpu;
100  
101  	ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
102  	cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
103  	return per_cpu_ptr(&spin_wait[ix], cpu - 1);
104  }
105  
arch_spin_yield_target(int lock,struct spin_wait * node)106  static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
107  {
108  	if (lock & _Q_LOCK_CPU_MASK)
109  		return lock & _Q_LOCK_CPU_MASK;
110  	if (node == NULL || node->prev == NULL)
111  		return 0;	/* 0 -> no target cpu */
112  	while (node->prev)
113  		node = node->prev;
114  	return node->node_id >> _Q_TAIL_CPU_OFFSET;
115  }
116  
arch_spin_lock_queued(arch_spinlock_t * lp)117  static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
118  {
119  	struct spin_wait *node, *next;
120  	int lockval, ix, node_id, tail_id, old, new, owner, count;
121  
122  	ix = get_lowcore()->spinlock_index++;
123  	barrier();
124  	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
125  	node = this_cpu_ptr(&spin_wait[ix]);
126  	node->prev = node->next = NULL;
127  	node_id = node->node_id;
128  
129  	/* Enqueue the node for this CPU in the spinlock wait queue */
130  	while (1) {
131  		old = READ_ONCE(lp->lock);
132  		if ((old & _Q_LOCK_CPU_MASK) == 0 &&
133  		    (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
134  			/*
135  			 * The lock is free but there may be waiters.
136  			 * With no waiters simply take the lock, if there
137  			 * are waiters try to steal the lock. The lock may
138  			 * be stolen three times before the next queued
139  			 * waiter will get the lock.
140  			 */
141  			new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
142  			if (__atomic_cmpxchg_bool(&lp->lock, old, new))
143  				/* Got the lock */
144  				goto out;
145  			/* lock passing in progress */
146  			continue;
147  		}
148  		/* Make the node of this CPU the new tail. */
149  		new = node_id | (old & _Q_LOCK_MASK);
150  		if (__atomic_cmpxchg_bool(&lp->lock, old, new))
151  			break;
152  	}
153  	/* Set the 'next' pointer of the tail node in the queue */
154  	tail_id = old & _Q_TAIL_MASK;
155  	if (tail_id != 0) {
156  		node->prev = arch_spin_decode_tail(tail_id);
157  		WRITE_ONCE(node->prev->next, node);
158  	}
159  
160  	/* Pass the virtual CPU to the lock holder if it is not running */
161  	owner = arch_spin_yield_target(old, node);
162  	if (owner && arch_vcpu_is_preempted(owner - 1))
163  		smp_yield_cpu(owner - 1);
164  
165  	/* Spin on the CPU local node->prev pointer */
166  	if (tail_id != 0) {
167  		count = spin_retry;
168  		while (READ_ONCE(node->prev) != NULL) {
169  			if (count-- >= 0)
170  				continue;
171  			count = spin_retry;
172  			/* Query running state of lock holder again. */
173  			owner = arch_spin_yield_target(old, node);
174  			if (owner && arch_vcpu_is_preempted(owner - 1))
175  				smp_yield_cpu(owner - 1);
176  		}
177  	}
178  
179  	/* Spin on the lock value in the spinlock_t */
180  	count = spin_retry;
181  	while (1) {
182  		old = READ_ONCE(lp->lock);
183  		owner = old & _Q_LOCK_CPU_MASK;
184  		if (!owner) {
185  			tail_id = old & _Q_TAIL_MASK;
186  			new = ((tail_id != node_id) ? tail_id : 0) | lockval;
187  			if (__atomic_cmpxchg_bool(&lp->lock, old, new))
188  				/* Got the lock */
189  				break;
190  			continue;
191  		}
192  		if (count-- >= 0)
193  			continue;
194  		count = spin_retry;
195  		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
196  			smp_yield_cpu(owner - 1);
197  	}
198  
199  	/* Pass lock_spin job to next CPU in the queue */
200  	if (node_id && tail_id != node_id) {
201  		/* Wait until the next CPU has set up the 'next' pointer */
202  		while ((next = READ_ONCE(node->next)) == NULL)
203  			;
204  		next->prev = NULL;
205  	}
206  
207   out:
208  	get_lowcore()->spinlock_index--;
209  }
210  
arch_spin_lock_classic(arch_spinlock_t * lp)211  static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
212  {
213  	int lockval, old, new, owner, count;
214  
215  	lockval = SPINLOCK_LOCKVAL;	/* cpu + 1 */
216  
217  	/* Pass the virtual CPU to the lock holder if it is not running */
218  	owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
219  	if (owner && arch_vcpu_is_preempted(owner - 1))
220  		smp_yield_cpu(owner - 1);
221  
222  	count = spin_retry;
223  	while (1) {
224  		old = arch_load_niai4(&lp->lock);
225  		owner = old & _Q_LOCK_CPU_MASK;
226  		/* Try to get the lock if it is free. */
227  		if (!owner) {
228  			new = (old & _Q_TAIL_MASK) | lockval;
229  			if (arch_cmpxchg_niai8(&lp->lock, old, new)) {
230  				/* Got the lock */
231  				return;
232  			}
233  			continue;
234  		}
235  		if (count-- >= 0)
236  			continue;
237  		count = spin_retry;
238  		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
239  			smp_yield_cpu(owner - 1);
240  	}
241  }
242  
arch_spin_lock_wait(arch_spinlock_t * lp)243  void arch_spin_lock_wait(arch_spinlock_t *lp)
244  {
245  	if (test_cpu_flag(CIF_DEDICATED_CPU))
246  		arch_spin_lock_queued(lp);
247  	else
248  		arch_spin_lock_classic(lp);
249  }
250  EXPORT_SYMBOL(arch_spin_lock_wait);
251  
arch_spin_trylock_retry(arch_spinlock_t * lp)252  int arch_spin_trylock_retry(arch_spinlock_t *lp)
253  {
254  	int cpu = SPINLOCK_LOCKVAL;
255  	int owner, count;
256  
257  	for (count = spin_retry; count > 0; count--) {
258  		owner = READ_ONCE(lp->lock);
259  		/* Try to get the lock if it is free. */
260  		if (!owner) {
261  			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
262  				return 1;
263  		}
264  	}
265  	return 0;
266  }
267  EXPORT_SYMBOL(arch_spin_trylock_retry);
268  
arch_read_lock_wait(arch_rwlock_t * rw)269  void arch_read_lock_wait(arch_rwlock_t *rw)
270  {
271  	if (unlikely(in_interrupt())) {
272  		while (READ_ONCE(rw->cnts) & 0x10000)
273  			barrier();
274  		return;
275  	}
276  
277  	/* Remove this reader again to allow recursive read locking */
278  	__atomic_add_const(-1, &rw->cnts);
279  	/* Put the reader into the wait queue */
280  	arch_spin_lock(&rw->wait);
281  	/* Now add this reader to the count value again */
282  	__atomic_add_const(1, &rw->cnts);
283  	/* Loop until the writer is done */
284  	while (READ_ONCE(rw->cnts) & 0x10000)
285  		barrier();
286  	arch_spin_unlock(&rw->wait);
287  }
288  EXPORT_SYMBOL(arch_read_lock_wait);
289  
arch_write_lock_wait(arch_rwlock_t * rw)290  void arch_write_lock_wait(arch_rwlock_t *rw)
291  {
292  	int old;
293  
294  	/* Add this CPU to the write waiters */
295  	__atomic_add(0x20000, &rw->cnts);
296  
297  	/* Put the writer into the wait queue */
298  	arch_spin_lock(&rw->wait);
299  
300  	while (1) {
301  		old = READ_ONCE(rw->cnts);
302  		if ((old & 0x1ffff) == 0 &&
303  		    __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
304  			/* Got the lock */
305  			break;
306  		barrier();
307  	}
308  
309  	arch_spin_unlock(&rw->wait);
310  }
311  EXPORT_SYMBOL(arch_write_lock_wait);
312  
arch_spin_relax(arch_spinlock_t * lp)313  void arch_spin_relax(arch_spinlock_t *lp)
314  {
315  	int cpu;
316  
317  	cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
318  	if (!cpu)
319  		return;
320  	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
321  		return;
322  	smp_yield_cpu(cpu - 1);
323  }
324  EXPORT_SYMBOL(arch_spin_relax);
325