1 /* atomic.h: atomic operation emulation for FR-V
2  *
3  * For an explanation of how atomic ops work in this arch, see:
4  *   Documentation/frv/atomic-ops.txt
5  *
6  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
7  * Written by David Howells (dhowells@redhat.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 #ifndef _ASM_ATOMIC_H
15 #define _ASM_ATOMIC_H
16 
17 #include <linux/types.h>
18 #include <asm/spr-regs.h>
19 #include <asm/system.h>
20 
21 #ifdef CONFIG_SMP
22 #error not SMP safe
23 #endif
24 
25 /*
26  * Atomic operations that C can't guarantee us.  Useful for
27  * resource counting etc..
28  *
29  * We do not have SMP systems, so we don't have to deal with that.
30  */
31 
32 /* Atomic operations are already serializing */
33 #define smp_mb__before_atomic_dec()	barrier()
34 #define smp_mb__after_atomic_dec()	barrier()
35 #define smp_mb__before_atomic_inc()	barrier()
36 #define smp_mb__after_atomic_inc()	barrier()
37 
38 #define ATOMIC_INIT(i)		{ (i) }
39 #define atomic_read(v)		(*(volatile int *)&(v)->counter)
40 #define atomic_set(v, i)	(((v)->counter) = (i))
41 
42 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
atomic_add_return(int i,atomic_t * v)43 static inline int atomic_add_return(int i, atomic_t *v)
44 {
45 	unsigned long val;
46 
47 	asm("0:						\n"
48 	    "	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
49 	    "	ckeq		icc3,cc7		\n"
50 	    "	ld.p		%M0,%1			\n"	/* LD.P/ORCR must be atomic */
51 	    "	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
52 	    "	add%I2		%1,%2,%1		\n"
53 	    "	cst.p		%1,%M0		,cc3,#1	\n"
54 	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* clear ICC3.Z if store happens */
55 	    "	beq		icc3,#0,0b		\n"
56 	    : "+U"(v->counter), "=&r"(val)
57 	    : "NPr"(i)
58 	    : "memory", "cc7", "cc3", "icc3"
59 	    );
60 
61 	return val;
62 }
63 
atomic_sub_return(int i,atomic_t * v)64 static inline int atomic_sub_return(int i, atomic_t *v)
65 {
66 	unsigned long val;
67 
68 	asm("0:						\n"
69 	    "	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
70 	    "	ckeq		icc3,cc7		\n"
71 	    "	ld.p		%M0,%1			\n"	/* LD.P/ORCR must be atomic */
72 	    "	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
73 	    "	sub%I2		%1,%2,%1		\n"
74 	    "	cst.p		%1,%M0		,cc3,#1	\n"
75 	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* clear ICC3.Z if store happens */
76 	    "	beq		icc3,#0,0b		\n"
77 	    : "+U"(v->counter), "=&r"(val)
78 	    : "NPr"(i)
79 	    : "memory", "cc7", "cc3", "icc3"
80 	    );
81 
82 	return val;
83 }
84 
85 #else
86 
87 extern int atomic_add_return(int i, atomic_t *v);
88 extern int atomic_sub_return(int i, atomic_t *v);
89 
90 #endif
91 
atomic_add_negative(int i,atomic_t * v)92 static inline int atomic_add_negative(int i, atomic_t *v)
93 {
94 	return atomic_add_return(i, v) < 0;
95 }
96 
atomic_add(int i,atomic_t * v)97 static inline void atomic_add(int i, atomic_t *v)
98 {
99 	atomic_add_return(i, v);
100 }
101 
atomic_sub(int i,atomic_t * v)102 static inline void atomic_sub(int i, atomic_t *v)
103 {
104 	atomic_sub_return(i, v);
105 }
106 
atomic_inc(atomic_t * v)107 static inline void atomic_inc(atomic_t *v)
108 {
109 	atomic_add_return(1, v);
110 }
111 
atomic_dec(atomic_t * v)112 static inline void atomic_dec(atomic_t *v)
113 {
114 	atomic_sub_return(1, v);
115 }
116 
117 #define atomic_dec_return(v)		atomic_sub_return(1, (v))
118 #define atomic_inc_return(v)		atomic_add_return(1, (v))
119 
120 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
121 #define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
122 #define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
123 
124 /*
125  * 64-bit atomic ops
126  */
127 typedef struct {
128 	volatile long long counter;
129 } atomic64_t;
130 
131 #define ATOMIC64_INIT(i)	{ (i) }
132 
atomic64_read(atomic64_t * v)133 static inline long long atomic64_read(atomic64_t *v)
134 {
135 	long long counter;
136 
137 	asm("ldd%I1 %M1,%0"
138 	    : "=e"(counter)
139 	    : "m"(v->counter));
140 	return counter;
141 }
142 
atomic64_set(atomic64_t * v,long long i)143 static inline void atomic64_set(atomic64_t *v, long long i)
144 {
145 	asm volatile("std%I0 %1,%M0"
146 		     : "=m"(v->counter)
147 		     : "e"(i));
148 }
149 
150 extern long long atomic64_inc_return(atomic64_t *v);
151 extern long long atomic64_dec_return(atomic64_t *v);
152 extern long long atomic64_add_return(long long i, atomic64_t *v);
153 extern long long atomic64_sub_return(long long i, atomic64_t *v);
154 
atomic64_add_negative(long long i,atomic64_t * v)155 static inline long long atomic64_add_negative(long long i, atomic64_t *v)
156 {
157 	return atomic64_add_return(i, v) < 0;
158 }
159 
atomic64_add(long long i,atomic64_t * v)160 static inline void atomic64_add(long long i, atomic64_t *v)
161 {
162 	atomic64_add_return(i, v);
163 }
164 
atomic64_sub(long long i,atomic64_t * v)165 static inline void atomic64_sub(long long i, atomic64_t *v)
166 {
167 	atomic64_sub_return(i, v);
168 }
169 
atomic64_inc(atomic64_t * v)170 static inline void atomic64_inc(atomic64_t *v)
171 {
172 	atomic64_inc_return(v);
173 }
174 
atomic64_dec(atomic64_t * v)175 static inline void atomic64_dec(atomic64_t *v)
176 {
177 	atomic64_dec_return(v);
178 }
179 
180 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
181 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
182 #define atomic64_inc_and_test(v)	(atomic64_inc_return((v)) == 0)
183 
184 /*****************************************************************************/
185 /*
186  * exchange value with memory
187  */
188 extern uint64_t __xchg_64(uint64_t i, volatile void *v);
189 
190 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
191 
192 #define xchg(ptr, x)								\
193 ({										\
194 	__typeof__(ptr) __xg_ptr = (ptr);					\
195 	__typeof__(*(ptr)) __xg_orig;						\
196 										\
197 	switch (sizeof(__xg_orig)) {						\
198 	case 4:									\
199 		asm volatile(							\
200 			"swap%I0 %M0,%1"					\
201 			: "+m"(*__xg_ptr), "=r"(__xg_orig)			\
202 			: "1"(x)						\
203 			: "memory"						\
204 			);							\
205 		break;								\
206 										\
207 	default:								\
208 		__xg_orig = (__typeof__(__xg_orig))0;				\
209 		asm volatile("break");						\
210 		break;								\
211 	}									\
212 										\
213 	__xg_orig;								\
214 })
215 
216 #else
217 
218 extern uint32_t __xchg_32(uint32_t i, volatile void *v);
219 
220 #define xchg(ptr, x)										\
221 ({												\
222 	__typeof__(ptr) __xg_ptr = (ptr);							\
223 	__typeof__(*(ptr)) __xg_orig;								\
224 												\
225 	switch (sizeof(__xg_orig)) {								\
226 	case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr);	break;	\
227 	default:										\
228 		__xg_orig = (__typeof__(__xg_orig))0;									\
229 		asm volatile("break");								\
230 		break;										\
231 	}											\
232 	__xg_orig;										\
233 })
234 
235 #endif
236 
237 #define tas(ptr) (xchg((ptr), 1))
238 
239 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&(v)->counter, old, new))
240 #define atomic_xchg(v, new)		(xchg(&(v)->counter, new))
241 #define atomic64_cmpxchg(v, old, new)	(__cmpxchg_64(old, new, &(v)->counter))
242 #define atomic64_xchg(v, new)		(__xchg_64(new, &(v)->counter))
243 
__atomic_add_unless(atomic_t * v,int a,int u)244 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
245 {
246 	int c, old;
247 	c = atomic_read(v);
248 	for (;;) {
249 		if (unlikely(c == (u)))
250 			break;
251 		old = atomic_cmpxchg((v), c, c + (a));
252 		if (likely(old == c))
253 			break;
254 		c = old;
255 	}
256 	return c;
257 }
258 
259 
260 #endif /* _ASM_ATOMIC_H */
261