Lines Matching +full:1 +full:v

27 static __inline__ int arch_atomic_read(const atomic_t *v)  in arch_atomic_read()  argument
33 __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter)); in arch_atomic_read()
35 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); in arch_atomic_read()
40 static __inline__ void arch_atomic_set(atomic_t *v, int i) in arch_atomic_set() argument
44 __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); in arch_atomic_set()
46 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); in arch_atomic_set()
50 static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
55 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
58 " bne- 1b\n" \
59 : "=&r" (t), "+m" (v->counter) \
60 : "r"#sign (a), "r" (&v->counter) \
65 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
70 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
73 " bne- 1b\n" \
74 : "=&r" (t), "+m" (v->counter) \
75 : "r"#sign (a), "r" (&v->counter) \
82 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
87 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
88 #asm_op "%I3" suffix " %1,%0,%3\n" \
89 " stwcx. %1,0,%4\n" \
90 " bne- 1b\n" \
91 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
92 : "r"#sign (a), "r" (&v->counter) \
132 * @v: pointer of type atomic_t
133 * @a: the amount to add to v...
134 * @u: ...unless v is equal to u.
136 * Atomically adds @a to @v, so long as it was not @u.
137 * Returns the old value of @v.
139 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) in arch_atomic_fetch_add_unless() argument
145 "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\ in arch_atomic_fetch_add_unless()
149 " stwcx. %0,0,%1 \n\ in arch_atomic_fetch_add_unless()
150 bne- 1b \n" in arch_atomic_fetch_add_unless()
155 : "r" (&v->counter), "rI" (a), "r" (u) in arch_atomic_fetch_add_unless()
163 * Atomically test *v and decrement if it is greater than 0.
164 * The function returns the old value of *v minus 1, even if
165 * the atomic variable, v, was not decremented.
167 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v) in arch_atomic_dec_if_positive() argument
173 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ in arch_atomic_dec_if_positive()
174 cmpwi %0,1\n\ in arch_atomic_dec_if_positive()
175 addi %0,%0,-1\n\ in arch_atomic_dec_if_positive()
177 " stwcx. %0,0,%1\n\ in arch_atomic_dec_if_positive()
178 bne- 1b" in arch_atomic_dec_if_positive()
182 : "r" (&v->counter) in arch_atomic_dec_if_positive()
193 static __inline__ s64 arch_atomic64_read(const atomic64_t *v) in arch_atomic64_read() argument
199 __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter)); in arch_atomic64_read()
201 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter)); in arch_atomic64_read()
206 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) in arch_atomic64_set() argument
210 __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); in arch_atomic64_set()
212 __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i)); in arch_atomic64_set()
216 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
221 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
224 " bne- 1b\n" \
225 : "=&r" (t), "+m" (v->counter) \
226 : "r" (a), "r" (&v->counter) \
232 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
237 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
240 " bne- 1b\n" \
241 : "=&r" (t), "+m" (v->counter) \
242 : "r" (a), "r" (&v->counter) \
250 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
255 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
256 #asm_op " %1,%3,%0\n" \
257 " stdcx. %1,0,%4\n" \
258 " bne- 1b\n" \
259 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
260 : "r" (a), "r" (&v->counter) \
298 static __inline__ void arch_atomic64_inc(atomic64_t *v) in ATOMIC64_OPS()
303 "1: ldarx %0,0,%2 # atomic64_inc\n\ in ATOMIC64_OPS()
304 addic %0,%0,1\n\ in ATOMIC64_OPS()
306 bne- 1b" in ATOMIC64_OPS()
307 : "=&r" (t), "+m" (v->counter) in ATOMIC64_OPS()
308 : "r" (&v->counter) in ATOMIC64_OPS()
313 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v) in arch_atomic64_inc_return_relaxed() argument
318 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n" in arch_atomic64_inc_return_relaxed()
319 " addic %0,%0,1\n" in arch_atomic64_inc_return_relaxed()
321 " bne- 1b" in arch_atomic64_inc_return_relaxed()
322 : "=&r" (t), "+m" (v->counter) in arch_atomic64_inc_return_relaxed()
323 : "r" (&v->counter) in arch_atomic64_inc_return_relaxed()
329 static __inline__ void arch_atomic64_dec(atomic64_t *v) in arch_atomic64_dec() argument
334 "1: ldarx %0,0,%2 # atomic64_dec\n\ in arch_atomic64_dec()
335 addic %0,%0,-1\n\ in arch_atomic64_dec()
337 bne- 1b" in arch_atomic64_dec()
338 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec()
339 : "r" (&v->counter) in arch_atomic64_dec()
344 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v) in arch_atomic64_dec_return_relaxed() argument
349 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n" in arch_atomic64_dec_return_relaxed()
350 " addic %0,%0,-1\n" in arch_atomic64_dec_return_relaxed()
352 " bne- 1b" in arch_atomic64_dec_return_relaxed()
353 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec_return_relaxed()
354 : "r" (&v->counter) in arch_atomic64_dec_return_relaxed()
364 * Atomically test *v and decrement if it is greater than 0.
365 * The function returns the old value of *v minus 1.
367 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v) in arch_atomic64_dec_if_positive() argument
373 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ in arch_atomic64_dec_if_positive()
374 addic. %0,%0,-1\n\ in arch_atomic64_dec_if_positive()
376 stdcx. %0,0,%1\n\ in arch_atomic64_dec_if_positive()
377 bne- 1b" in arch_atomic64_dec_if_positive()
381 : "r" (&v->counter) in arch_atomic64_dec_if_positive()
390 * @v: pointer of type atomic64_t
391 * @a: the amount to add to v...
392 * @u: ...unless v is equal to u.
394 * Atomically adds @a to @v, so long as it was not @u.
395 * Returns the old value of @v.
397 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) in arch_atomic64_fetch_add_unless() argument
403 "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\ in arch_atomic64_fetch_add_unless()
407 " stdcx. %0,0,%1 \n\ in arch_atomic64_fetch_add_unless()
408 bne- 1b \n" in arch_atomic64_fetch_add_unless()
413 : "r" (&v->counter), "r" (a), "r" (u) in arch_atomic64_fetch_add_unless()
422 * @v: pointer of type atomic64_t
424 * Atomically increments @v by 1, so long as @v is non-zero.
425 * Returns non-zero if @v was non-zero, and zero otherwise.
427 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v) in arch_atomic64_inc_not_zero() argument
433 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\ in arch_atomic64_inc_not_zero()
436 addic %1,%0,1\n\ in arch_atomic64_inc_not_zero()
437 stdcx. %1,0,%2\n\ in arch_atomic64_inc_not_zero()
438 bne- 1b\n" in arch_atomic64_inc_not_zero()
443 : "r" (&v->counter) in arch_atomic64_inc_not_zero()
448 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v)) argument