1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Low level function for atomic operations
4  *
5  * Copyright IBM Corp. 1999, 2016
6  */
7 
8 #ifndef __ARCH_S390_ATOMIC_OPS__
9 #define __ARCH_S390_ATOMIC_OPS__
10 
11 #include <linux/limits.h>
12 #include <asm/march.h>
13 
__atomic_read(const atomic_t * v)14 static __always_inline int __atomic_read(const atomic_t *v)
15 {
16 	int c;
17 
18 	asm volatile(
19 		"	l	%[c],%[counter]\n"
20 		: [c] "=d" (c) : [counter] "R" (v->counter));
21 	return c;
22 }
23 
__atomic_set(atomic_t * v,int i)24 static __always_inline void __atomic_set(atomic_t *v, int i)
25 {
26 	if (__builtin_constant_p(i) && i >= S16_MIN && i <= S16_MAX) {
27 		asm volatile(
28 			"	mvhi	%[counter], %[i]\n"
29 			: [counter] "=Q" (v->counter) : [i] "K" (i));
30 	} else {
31 		asm volatile(
32 			"	st	%[i],%[counter]\n"
33 			: [counter] "=R" (v->counter) : [i] "d" (i));
34 	}
35 }
36 
__atomic64_read(const atomic64_t * v)37 static __always_inline s64 __atomic64_read(const atomic64_t *v)
38 {
39 	s64 c;
40 
41 	asm volatile(
42 		"	lg	%[c],%[counter]\n"
43 		: [c] "=d" (c) : [counter] "RT" (v->counter));
44 	return c;
45 }
46 
__atomic64_set(atomic64_t * v,s64 i)47 static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
48 {
49 	if (__builtin_constant_p(i) && i >= S16_MIN && i <= S16_MAX) {
50 		asm volatile(
51 			"	mvghi	%[counter], %[i]\n"
52 			: [counter] "=Q" (v->counter) : [i] "K" (i));
53 	} else {
54 		asm volatile(
55 			"	stg	%[i],%[counter]\n"
56 			: [counter] "=RT" (v->counter) : [i] "d" (i));
57 	}
58 }
59 
60 #ifdef MARCH_HAS_Z196_FEATURES
61 
62 #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier)		\
63 static __always_inline op_type op_name(op_type val, op_type *ptr)	\
64 {									\
65 	op_type old;							\
66 									\
67 	asm volatile(							\
68 		op_string "	%[old],%[val],%[ptr]\n"			\
69 		op_barrier						\
70 		: [old] "=d" (old), [ptr] "+QS" (*ptr)			\
71 		: [val] "d" (val) : "cc", "memory");			\
72 	return old;							\
73 }									\
74 
75 #define __ATOMIC_OPS(op_name, op_type, op_string)			\
76 	__ATOMIC_OP(op_name, op_type, op_string, "\n")			\
77 	__ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
78 
79 __ATOMIC_OPS(__atomic_add, int, "laa")
80 __ATOMIC_OPS(__atomic_and, int, "lan")
81 __ATOMIC_OPS(__atomic_or,  int, "lao")
82 __ATOMIC_OPS(__atomic_xor, int, "lax")
83 
84 __ATOMIC_OPS(__atomic64_add, long, "laag")
85 __ATOMIC_OPS(__atomic64_and, long, "lang")
86 __ATOMIC_OPS(__atomic64_or,  long, "laog")
87 __ATOMIC_OPS(__atomic64_xor, long, "laxg")
88 
89 #undef __ATOMIC_OPS
90 #undef __ATOMIC_OP
91 
92 #define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier)	\
93 static __always_inline void op_name(op_type val, op_type *ptr)		\
94 {									\
95 	asm volatile(							\
96 		op_string "	%[ptr],%[val]\n"			\
97 		op_barrier						\
98 		: [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\
99 }
100 
101 #define __ATOMIC_CONST_OPS(op_name, op_type, op_string)			\
102 	__ATOMIC_CONST_OP(op_name, op_type, op_string, "\n")		\
103 	__ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
104 
105 __ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
106 __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
107 
108 #undef __ATOMIC_CONST_OPS
109 #undef __ATOMIC_CONST_OP
110 
111 #else /* MARCH_HAS_Z196_FEATURES */
112 
113 #define __ATOMIC_OP(op_name, op_string)					\
114 static __always_inline int op_name(int val, int *ptr)			\
115 {									\
116 	int old, new;							\
117 									\
118 	asm volatile(							\
119 		"0:	lr	%[new],%[old]\n"			\
120 		op_string "	%[new],%[val]\n"			\
121 		"	cs	%[old],%[new],%[ptr]\n"			\
122 		"	jl	0b"					\
123 		: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
124 		: [val] "d" (val), "0" (*ptr) : "cc", "memory");	\
125 	return old;							\
126 }
127 
128 #define __ATOMIC_OPS(op_name, op_string)				\
129 	__ATOMIC_OP(op_name, op_string)					\
130 	__ATOMIC_OP(op_name##_barrier, op_string)
131 
132 __ATOMIC_OPS(__atomic_add, "ar")
133 __ATOMIC_OPS(__atomic_and, "nr")
134 __ATOMIC_OPS(__atomic_or,  "or")
135 __ATOMIC_OPS(__atomic_xor, "xr")
136 
137 #undef __ATOMIC_OPS
138 
139 #define __ATOMIC64_OP(op_name, op_string)				\
140 static __always_inline long op_name(long val, long *ptr)		\
141 {									\
142 	long old, new;							\
143 									\
144 	asm volatile(							\
145 		"0:	lgr	%[new],%[old]\n"			\
146 		op_string "	%[new],%[val]\n"			\
147 		"	csg	%[old],%[new],%[ptr]\n"			\
148 		"	jl	0b"					\
149 		: [old] "=d" (old), [new] "=&d" (new), [ptr] "+QS" (*ptr)\
150 		: [val] "d" (val), "0" (*ptr) : "cc", "memory");	\
151 	return old;							\
152 }
153 
154 #define __ATOMIC64_OPS(op_name, op_string)				\
155 	__ATOMIC64_OP(op_name, op_string)				\
156 	__ATOMIC64_OP(op_name##_barrier, op_string)
157 
158 __ATOMIC64_OPS(__atomic64_add, "agr")
159 __ATOMIC64_OPS(__atomic64_and, "ngr")
160 __ATOMIC64_OPS(__atomic64_or,  "ogr")
161 __ATOMIC64_OPS(__atomic64_xor, "xgr")
162 
163 #undef __ATOMIC64_OPS
164 
165 #define __atomic_add_const(val, ptr)		__atomic_add(val, ptr)
166 #define __atomic_add_const_barrier(val, ptr)	__atomic_add(val, ptr)
167 #define __atomic64_add_const(val, ptr)		__atomic64_add(val, ptr)
168 #define __atomic64_add_const_barrier(val, ptr)	__atomic64_add(val, ptr)
169 
170 #endif /* MARCH_HAS_Z196_FEATURES */
171 
__atomic_cmpxchg(int * ptr,int old,int new)172 static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
173 {
174 	asm volatile(
175 		"	cs	%[old],%[new],%[ptr]"
176 		: [old] "+d" (old), [ptr] "+Q" (*ptr)
177 		: [new] "d" (new)
178 		: "cc", "memory");
179 	return old;
180 }
181 
__atomic64_cmpxchg(long * ptr,long old,long new)182 static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
183 {
184 	asm volatile(
185 		"	csg	%[old],%[new],%[ptr]"
186 		: [old] "+d" (old), [ptr] "+QS" (*ptr)
187 		: [new] "d" (new)
188 		: "cc", "memory");
189 	return old;
190 }
191 
192 /* GCC versions before 14.2.0 may die with an ICE in some configurations. */
193 #if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_IS_GCC) && (GCC_VERSION < 140200))
194 
__atomic_cmpxchg_bool(int * ptr,int old,int new)195 static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
196 {
197 	int cc;
198 
199 	asm volatile(
200 		"	cs	%[old],%[new],%[ptr]"
201 		: [old] "+d" (old), [ptr] "+Q" (*ptr), "=@cc" (cc)
202 		: [new] "d" (new)
203 		: "memory");
204 	return cc == 0;
205 }
206 
__atomic64_cmpxchg_bool(long * ptr,long old,long new)207 static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
208 {
209 	int cc;
210 
211 	asm volatile(
212 		"	csg	%[old],%[new],%[ptr]"
213 		: [old] "+d" (old), [ptr] "+QS" (*ptr), "=@cc" (cc)
214 		: [new] "d" (new)
215 		: "memory");
216 	return cc == 0;
217 }
218 
219 #else /* __GCC_ASM_FLAG_OUTPUTS__ */
220 
__atomic_cmpxchg_bool(int * ptr,int old,int new)221 static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
222 {
223 	int old_expected = old;
224 
225 	asm volatile(
226 		"	cs	%[old],%[new],%[ptr]"
227 		: [old] "+d" (old), [ptr] "+Q" (*ptr)
228 		: [new] "d" (new)
229 		: "cc", "memory");
230 	return old == old_expected;
231 }
232 
__atomic64_cmpxchg_bool(long * ptr,long old,long new)233 static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
234 {
235 	long old_expected = old;
236 
237 	asm volatile(
238 		"	csg	%[old],%[new],%[ptr]"
239 		: [old] "+d" (old), [ptr] "+QS" (*ptr)
240 		: [new] "d" (new)
241 		: "cc", "memory");
242 	return old == old_expected;
243 }
244 
245 #endif /* __GCC_ASM_FLAG_OUTPUTS__ */
246 
247 #endif /* __ARCH_S390_ATOMIC_OPS__  */
248