Lines Matching +full:counter +full:- +full:1
1 /* SPDX-License-Identifier: GPL-2.0 */
13 #include <asm/asm-const.h>
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
30 /* -mprefixed can generate offsets beyond range, fall back hack */ in arch_atomic_read()
32 __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter)); in arch_atomic_read()
34 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); in arch_atomic_read()
41 /* -mprefixed can generate offsets beyond range, fall back hack */ in arch_atomic_set()
43 __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); in arch_atomic_set()
45 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); in arch_atomic_set()
54 "1: lwarx %0,0,%3 # atomic_" #op "\n" \
57 " bne- 1b\n" \
58 : "=&r" (t), "+m" (v->counter) \
59 : "r"#sign (a), "r" (&v->counter) \
69 "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
72 " bne- 1b\n" \
73 : "=&r" (t), "+m" (v->counter) \
74 : "r"#sign (a), "r" (&v->counter) \
86 "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
87 #asm_op "%I3" suffix " %1,%0,%3\n" \
88 " stwcx. %1,0,%4\n" \
89 " bne- 1b\n" \
90 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
91 : "r"#sign (a), "r" (&v->counter) \
130 * atomic_fetch_add_unless - add unless the number is a given value
144 "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\ in arch_atomic_fetch_add_unless()
148 " stwcx. %0,0,%1 \n\ in arch_atomic_fetch_add_unless()
149 bne- 1b \n" in arch_atomic_fetch_add_unless()
154 : "r" (&v->counter), "rI" (a), "r" (u) in arch_atomic_fetch_add_unless()
163 * The function returns the old value of *v minus 1, even if
172 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ in arch_atomic_dec_if_positive()
173 cmpwi %0,1\n\ in arch_atomic_dec_if_positive()
174 addi %0,%0,-1\n\ in arch_atomic_dec_if_positive()
175 blt- 2f\n" in arch_atomic_dec_if_positive()
176 " stwcx. %0,0,%1\n\ in arch_atomic_dec_if_positive()
177 bne- 1b" in arch_atomic_dec_if_positive()
181 : "r" (&v->counter) in arch_atomic_dec_if_positive()
196 /* -mprefixed can generate offsets beyond range, fall back hack */ in arch_atomic64_read()
198 __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter)); in arch_atomic64_read()
200 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); in arch_atomic64_read()
207 /* -mprefixed can generate offsets beyond range, fall back hack */ in arch_atomic64_set()
209 __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); in arch_atomic64_set()
211 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); in arch_atomic64_set()
220 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
223 " bne- 1b\n" \
224 : "=&r" (t), "+m" (v->counter) \
225 : "r" (a), "r" (&v->counter) \
236 "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
239 " bne- 1b\n" \
240 : "=&r" (t), "+m" (v->counter) \
241 : "r" (a), "r" (&v->counter) \
254 "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
255 #asm_op " %1,%3,%0\n" \
256 " stdcx. %1,0,%4\n" \
257 " bne- 1b\n" \
258 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
259 : "r" (a), "r" (&v->counter) \
302 "1: ldarx %0,0,%2 # atomic64_inc\n\ in ATOMIC64_OPS()
303 addic %0,%0,1\n\ in ATOMIC64_OPS()
305 bne- 1b" in ATOMIC64_OPS()
306 : "=&r" (t), "+m" (v->counter) in ATOMIC64_OPS()
307 : "r" (&v->counter) in ATOMIC64_OPS()
317 "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n" in arch_atomic64_inc_return_relaxed()
318 " addic %0,%0,1\n" in arch_atomic64_inc_return_relaxed()
320 " bne- 1b" in arch_atomic64_inc_return_relaxed()
321 : "=&r" (t), "+m" (v->counter) in arch_atomic64_inc_return_relaxed()
322 : "r" (&v->counter) in arch_atomic64_inc_return_relaxed()
333 "1: ldarx %0,0,%2 # atomic64_dec\n\ in arch_atomic64_dec()
334 addic %0,%0,-1\n\ in arch_atomic64_dec()
336 bne- 1b" in arch_atomic64_dec()
337 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec()
338 : "r" (&v->counter) in arch_atomic64_dec()
348 "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n" in arch_atomic64_dec_return_relaxed()
349 " addic %0,%0,-1\n" in arch_atomic64_dec_return_relaxed()
351 " bne- 1b" in arch_atomic64_dec_return_relaxed()
352 : "=&r" (t), "+m" (v->counter) in arch_atomic64_dec_return_relaxed()
353 : "r" (&v->counter) in arch_atomic64_dec_return_relaxed()
364 * The function returns the old value of *v minus 1.
372 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ in arch_atomic64_dec_if_positive()
373 addic. %0,%0,-1\n\ in arch_atomic64_dec_if_positive()
374 blt- 2f\n\ in arch_atomic64_dec_if_positive()
375 stdcx. %0,0,%1\n\ in arch_atomic64_dec_if_positive()
376 bne- 1b" in arch_atomic64_dec_if_positive()
380 : "r" (&v->counter) in arch_atomic64_dec_if_positive()
388 * atomic64_fetch_add_unless - add unless the number is a given value
402 "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\ in arch_atomic64_fetch_add_unless()
406 " stdcx. %0,0,%1 \n\ in arch_atomic64_fetch_add_unless()
407 bne- 1b \n" in arch_atomic64_fetch_add_unless()
412 : "r" (&v->counter), "r" (a), "r" (u) in arch_atomic64_fetch_add_unless()
420 * atomic_inc64_not_zero - increment unless the number is zero
423 * Atomically increments @v by 1, so long as @v is non-zero.
424 * Returns non-zero if @v was non-zero, and zero otherwise.
432 "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\ in arch_atomic64_inc_not_zero()
434 beq- 2f\n\ in arch_atomic64_inc_not_zero()
435 addic %1,%0,1\n\ in arch_atomic64_inc_not_zero()
436 stdcx. %1,0,%2\n\ in arch_atomic64_inc_not_zero()
437 bne- 1b\n" in arch_atomic64_inc_not_zero()
442 : "r" (&v->counter) in arch_atomic64_inc_not_zero()