2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * Do not include directly; use <linux/atomic.h>.
17 #ifndef _ASM_TILE_ATOMIC_64_H
18 #define _ASM_TILE_ATOMIC_64_H
22 #include <asm/barrier.h>
23 #include <arch/spr_def.h>
25 /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
27 #define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
30 * The smp_mb() operations throughout are to support the fact that
31 * Linux requires memory barriers before and after the operation,
32 * on any routine which updates memory and returns a value.
36 * Note a subtlety of the locking here. We are required to provide a
37 * full memory barrier before and after the operation. However, we
38 * only provide an explicit mb before the operation. After the
39 * operation, we use barrier() to get a full mb for free, because:
41 * (1) The barrier directive to the compiler prohibits any instructions
42 * being statically hoisted before the barrier;
43 * (2) the microarchitecture will not issue any further instructions
44 * until the fetchadd result is available for the "+ i" add instruction;
45 * (3) the smb_mb before the fetchadd ensures that no other memory
46 * operations are in flight at this point.
48 static inline int atomic_add_return(int i, atomic_t *v)
51 smp_mb(); /* barrier for proper semantics */
52 val = __insn_fetchadd4((void *)&v->counter, i) + i;
53 barrier(); /* equivalent to smp_mb(); see block comment above */
57 #define ATOMIC_OPS(op) \
58 static inline int atomic_fetch_##op(int i, atomic_t *v) \
62 val = __insn_fetch##op##4((void *)&v->counter, i); \
66 static inline void atomic_##op(int i, atomic_t *v) \
68 __insn_fetch##op##4((void *)&v->counter, i); \
77 static inline int atomic_fetch_xor(int i, atomic_t *v)
79 int guess, oldval = v->counter;
83 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
84 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
85 } while (guess != oldval);
90 static inline void atomic_xor(int i, atomic_t *v)
92 int guess, oldval = v->counter;
95 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
96 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
97 } while (guess != oldval);
100 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
102 int guess, oldval = v->counter;
107 oldval = cmpxchg(&v->counter, guess, guess + a);
108 } while (guess != oldval);
112 /* Now the true 64-bit operations. */
114 #define ATOMIC64_INIT(i) { (i) }
116 #define atomic64_read(v) READ_ONCE((v)->counter)
117 #define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
119 static inline long atomic64_add_return(long i, atomic64_t *v)
122 smp_mb(); /* barrier for proper semantics */
123 val = __insn_fetchadd((void *)&v->counter, i) + i;
124 barrier(); /* equivalent to smp_mb; see atomic_add_return() */
128 #define ATOMIC64_OPS(op) \
129 static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
133 val = __insn_fetch##op((void *)&v->counter, i); \
137 static inline void atomic64_##op(long i, atomic64_t *v) \
139 __insn_fetch##op((void *)&v->counter, i); \
148 static inline long atomic64_fetch_xor(long i, atomic64_t *v)
150 long guess, oldval = v->counter;
154 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
155 oldval = __insn_cmpexch(&v->counter, guess ^ i);
156 } while (guess != oldval);
161 static inline void atomic64_xor(long i, atomic64_t *v)
163 long guess, oldval = v->counter;
166 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
167 oldval = __insn_cmpexch(&v->counter, guess ^ i);
168 } while (guess != oldval);
171 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
173 long guess, oldval = v->counter;
178 oldval = cmpxchg(&v->counter, guess, guess + a);
179 } while (guess != oldval);
183 #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
184 #define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
185 #define atomic64_sub(i, v) atomic64_add(-(i), (v))
186 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
187 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
188 #define atomic64_inc(v) atomic64_add(1, (v))
189 #define atomic64_dec(v) atomic64_sub(1, (v))
191 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
192 #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
193 #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
194 #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
196 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
198 #endif /* !__ASSEMBLY__ */
200 #endif /* _ASM_TILE_ATOMIC_64_H */