1 #ifndef _ASM_M32R_SPINLOCK_H
2 #define _ASM_M32R_SPINLOCK_H
3 
4 /*
5  *  linux/include/asm-m32r/spinlock.h
6  *
7  *  M32R version:
8  *    Copyright (C) 2001, 2002  Hitoshi Yamamoto
9  *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
10  */
11 
12 #include <linux/compiler.h>
13 #include <linux/atomic.h>
14 #include <asm/page.h>
15 
16 /*
17  * Your basic SMP spinlocks, allowing only a single CPU anywhere
18  *
19  * (the type definitions are in asm/spinlock_types.h)
20  *
21  * Simple spin lock operations.  There are two variants, one clears IRQ's
22  * on the local processor, one does not.
23  *
24  * We make no fairness assumptions. They have a cost.
25  */
26 
27 #define arch_spin_is_locked(x)		(*(volatile int *)(&(x)->slock) <= 0)
28 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
29 #define arch_spin_unlock_wait(x) \
30 		do { cpu_relax(); } while (arch_spin_is_locked(x))
31 
32 /**
33  * arch_spin_trylock - Try spin lock and return a result
34  * @lock: Pointer to the lock variable
35  *
36  * arch_spin_trylock() tries to get the lock and returns a result.
37  * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
38  */
arch_spin_trylock(arch_spinlock_t * lock)39 static inline int arch_spin_trylock(arch_spinlock_t *lock)
40 {
41 	int oldval;
42 	unsigned long tmp1, tmp2;
43 
44 	/*
45 	 * lock->slock :  =1 : unlock
46 	 *             : <=0 : lock
47 	 * {
48 	 *   oldval = lock->slock; <--+ need atomic operation
49 	 *   lock->slock = 0;      <--+
50 	 * }
51 	 */
52 	__asm__ __volatile__ (
53 		"# arch_spin_trylock		\n\t"
54 		"ldi	%1, #0;			\n\t"
55 		"mvfc	%2, psw;		\n\t"
56 		"clrpsw	#0x40 -> nop;		\n\t"
57 		DCACHE_CLEAR("%0", "r6", "%3")
58 		"lock	%0, @%3;		\n\t"
59 		"unlock	%1, @%3;		\n\t"
60 		"mvtc	%2, psw;		\n\t"
61 		: "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
62 		: "r" (&lock->slock)
63 		: "memory"
64 #ifdef CONFIG_CHIP_M32700_TS1
65 		, "r6"
66 #endif	/* CONFIG_CHIP_M32700_TS1 */
67 	);
68 
69 	return (oldval > 0);
70 }
71 
arch_spin_lock(arch_spinlock_t * lock)72 static inline void arch_spin_lock(arch_spinlock_t *lock)
73 {
74 	unsigned long tmp0, tmp1;
75 
76 	/*
77 	 * lock->slock :  =1 : unlock
78 	 *             : <=0 : lock
79 	 *
80 	 * for ( ; ; ) {
81 	 *   lock->slock -= 1;  <-- need atomic operation
82 	 *   if (lock->slock == 0) break;
83 	 *   for ( ; lock->slock <= 0 ; );
84 	 * }
85 	 */
86 	__asm__ __volatile__ (
87 		"# arch_spin_lock		\n\t"
88 		".fillinsn			\n"
89 		"1:				\n\t"
90 		"mvfc	%1, psw;		\n\t"
91 		"clrpsw	#0x40 -> nop;		\n\t"
92 		DCACHE_CLEAR("%0", "r6", "%2")
93 		"lock	%0, @%2;		\n\t"
94 		"addi	%0, #-1;		\n\t"
95 		"unlock	%0, @%2;		\n\t"
96 		"mvtc	%1, psw;		\n\t"
97 		"bltz	%0, 2f;			\n\t"
98 		LOCK_SECTION_START(".balign 4 \n\t")
99 		".fillinsn			\n"
100 		"2:				\n\t"
101 		"ld	%0, @%2;		\n\t"
102 		"bgtz	%0, 1b;			\n\t"
103 		"bra	2b;			\n\t"
104 		LOCK_SECTION_END
105 		: "=&r" (tmp0), "=&r" (tmp1)
106 		: "r" (&lock->slock)
107 		: "memory"
108 #ifdef CONFIG_CHIP_M32700_TS1
109 		, "r6"
110 #endif	/* CONFIG_CHIP_M32700_TS1 */
111 	);
112 }
113 
arch_spin_unlock(arch_spinlock_t * lock)114 static inline void arch_spin_unlock(arch_spinlock_t *lock)
115 {
116 	mb();
117 	lock->slock = 1;
118 }
119 
120 /*
121  * Read-write spinlocks, allowing multiple readers
122  * but only one writer.
123  *
124  * NOTE! it is quite common to have readers in interrupts
125  * but no interrupt writers. For those circumstances we
126  * can "mix" irq-safe locks - any writer needs to get a
127  * irq-safe write-lock, but readers can get non-irqsafe
128  * read-locks.
129  *
130  * On x86, we implement read-write locks as a 32-bit counter
131  * with the high bit (sign) being the "contended" bit.
132  *
133  * The inline assembly is non-obvious. Think about it.
134  *
135  * Changed to use the same technique as rw semaphores.  See
136  * semaphore.h for details.  -ben
137  */
138 
139 /**
140  * read_can_lock - would read_trylock() succeed?
141  * @lock: the rwlock in question.
142  */
143 #define arch_read_can_lock(x) ((int)(x)->lock > 0)
144 
145 /**
146  * write_can_lock - would write_trylock() succeed?
147  * @lock: the rwlock in question.
148  */
149 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
150 
arch_read_lock(arch_rwlock_t * rw)151 static inline void arch_read_lock(arch_rwlock_t *rw)
152 {
153 	unsigned long tmp0, tmp1;
154 
155 	/*
156 	 * rw->lock :  >0 : unlock
157 	 *          : <=0 : lock
158 	 *
159 	 * for ( ; ; ) {
160 	 *   rw->lock -= 1;  <-- need atomic operation
161 	 *   if (rw->lock >= 0) break;
162 	 *   rw->lock += 1;  <-- need atomic operation
163 	 *   for ( ; rw->lock <= 0 ; );
164 	 * }
165 	 */
166 	__asm__ __volatile__ (
167 		"# read_lock			\n\t"
168 		".fillinsn			\n"
169 		"1:				\n\t"
170 		"mvfc	%1, psw;		\n\t"
171 		"clrpsw	#0x40 -> nop;		\n\t"
172 		DCACHE_CLEAR("%0", "r6", "%2")
173 		"lock	%0, @%2;		\n\t"
174 		"addi	%0, #-1;		\n\t"
175 		"unlock	%0, @%2;		\n\t"
176 		"mvtc	%1, psw;		\n\t"
177 		"bltz	%0, 2f;			\n\t"
178 		LOCK_SECTION_START(".balign 4 \n\t")
179 		".fillinsn			\n"
180 		"2:				\n\t"
181 		"clrpsw	#0x40 -> nop;		\n\t"
182 		DCACHE_CLEAR("%0", "r6", "%2")
183 		"lock	%0, @%2;		\n\t"
184 		"addi	%0, #1;			\n\t"
185 		"unlock	%0, @%2;		\n\t"
186 		"mvtc	%1, psw;		\n\t"
187 		".fillinsn			\n"
188 		"3:				\n\t"
189 		"ld	%0, @%2;		\n\t"
190 		"bgtz	%0, 1b;			\n\t"
191 		"bra	3b;			\n\t"
192 		LOCK_SECTION_END
193 		: "=&r" (tmp0), "=&r" (tmp1)
194 		: "r" (&rw->lock)
195 		: "memory"
196 #ifdef CONFIG_CHIP_M32700_TS1
197 		, "r6"
198 #endif	/* CONFIG_CHIP_M32700_TS1 */
199 	);
200 }
201 
arch_write_lock(arch_rwlock_t * rw)202 static inline void arch_write_lock(arch_rwlock_t *rw)
203 {
204 	unsigned long tmp0, tmp1, tmp2;
205 
206 	/*
207 	 * rw->lock :  =RW_LOCK_BIAS_STR : unlock
208 	 *          : !=RW_LOCK_BIAS_STR : lock
209 	 *
210 	 * for ( ; ; ) {
211 	 *   rw->lock -= RW_LOCK_BIAS_STR;  <-- need atomic operation
212 	 *   if (rw->lock == 0) break;
213 	 *   rw->lock += RW_LOCK_BIAS_STR;  <-- need atomic operation
214 	 *   for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
215 	 * }
216 	 */
217 	__asm__ __volatile__ (
218 		"# write_lock					\n\t"
219 		"seth	%1, #high(" RW_LOCK_BIAS_STR ");	\n\t"
220 		"or3	%1, %1, #low(" RW_LOCK_BIAS_STR ");	\n\t"
221 		".fillinsn					\n"
222 		"1:						\n\t"
223 		"mvfc	%2, psw;				\n\t"
224 		"clrpsw	#0x40 -> nop;				\n\t"
225 		DCACHE_CLEAR("%0", "r7", "%3")
226 		"lock	%0, @%3;				\n\t"
227 		"sub	%0, %1;					\n\t"
228 		"unlock	%0, @%3;				\n\t"
229 		"mvtc	%2, psw;				\n\t"
230 		"bnez	%0, 2f;					\n\t"
231 		LOCK_SECTION_START(".balign 4 \n\t")
232 		".fillinsn					\n"
233 		"2:						\n\t"
234 		"clrpsw	#0x40 -> nop;				\n\t"
235 		DCACHE_CLEAR("%0", "r7", "%3")
236 		"lock	%0, @%3;				\n\t"
237 		"add	%0, %1;					\n\t"
238 		"unlock	%0, @%3;				\n\t"
239 		"mvtc	%2, psw;				\n\t"
240 		".fillinsn					\n"
241 		"3:						\n\t"
242 		"ld	%0, @%3;				\n\t"
243 		"beq	%0, %1, 1b;				\n\t"
244 		"bra	3b;					\n\t"
245 		LOCK_SECTION_END
246 		: "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
247 		: "r" (&rw->lock)
248 		: "memory"
249 #ifdef CONFIG_CHIP_M32700_TS1
250 		, "r7"
251 #endif	/* CONFIG_CHIP_M32700_TS1 */
252 	);
253 }
254 
arch_read_unlock(arch_rwlock_t * rw)255 static inline void arch_read_unlock(arch_rwlock_t *rw)
256 {
257 	unsigned long tmp0, tmp1;
258 
259 	__asm__ __volatile__ (
260 		"# read_unlock			\n\t"
261 		"mvfc	%1, psw;		\n\t"
262 		"clrpsw	#0x40 -> nop;		\n\t"
263 		DCACHE_CLEAR("%0", "r6", "%2")
264 		"lock	%0, @%2;		\n\t"
265 		"addi	%0, #1;			\n\t"
266 		"unlock	%0, @%2;		\n\t"
267 		"mvtc	%1, psw;		\n\t"
268 		: "=&r" (tmp0), "=&r" (tmp1)
269 		: "r" (&rw->lock)
270 		: "memory"
271 #ifdef CONFIG_CHIP_M32700_TS1
272 		, "r6"
273 #endif	/* CONFIG_CHIP_M32700_TS1 */
274 	);
275 }
276 
arch_write_unlock(arch_rwlock_t * rw)277 static inline void arch_write_unlock(arch_rwlock_t *rw)
278 {
279 	unsigned long tmp0, tmp1, tmp2;
280 
281 	__asm__ __volatile__ (
282 		"# write_unlock					\n\t"
283 		"seth	%1, #high(" RW_LOCK_BIAS_STR ");	\n\t"
284 		"or3	%1, %1, #low(" RW_LOCK_BIAS_STR ");	\n\t"
285 		"mvfc	%2, psw;				\n\t"
286 		"clrpsw	#0x40 -> nop;				\n\t"
287 		DCACHE_CLEAR("%0", "r7", "%3")
288 		"lock	%0, @%3;				\n\t"
289 		"add	%0, %1;					\n\t"
290 		"unlock	%0, @%3;				\n\t"
291 		"mvtc	%2, psw;				\n\t"
292 		: "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
293 		: "r" (&rw->lock)
294 		: "memory"
295 #ifdef CONFIG_CHIP_M32700_TS1
296 		, "r7"
297 #endif	/* CONFIG_CHIP_M32700_TS1 */
298 	);
299 }
300 
arch_read_trylock(arch_rwlock_t * lock)301 static inline int arch_read_trylock(arch_rwlock_t *lock)
302 {
303 	atomic_t *count = (atomic_t*)lock;
304 	if (atomic_dec_return(count) >= 0)
305 		return 1;
306 	atomic_inc(count);
307 	return 0;
308 }
309 
arch_write_trylock(arch_rwlock_t * lock)310 static inline int arch_write_trylock(arch_rwlock_t *lock)
311 {
312 	atomic_t *count = (atomic_t *)lock;
313 	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
314 		return 1;
315 	atomic_add(RW_LOCK_BIAS, count);
316 	return 0;
317 }
318 
319 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
320 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
321 
322 #define arch_spin_relax(lock)	cpu_relax()
323 #define arch_read_relax(lock)	cpu_relax()
324 #define arch_write_relax(lock)	cpu_relax()
325 
326 #endif	/* _ASM_M32R_SPINLOCK_H */
327