1 #ifndef _ASM_POWERPC_RWSEM_H
2 #define _ASM_POWERPC_RWSEM_H
3 
4 #ifndef _LINUX_RWSEM_H
5 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
6 #endif
7 
8 #ifdef __KERNEL__
9 
10 /*
11  * R/W semaphores for PPC using the stuff in lib/rwsem.c.
12  * Adapted largely from include/asm-i386/rwsem.h
13  * by Paul Mackerras <paulus@samba.org>.
14  */
15 
16 /*
17  * the semaphore definition
18  */
19 #ifdef CONFIG_PPC64
20 # define RWSEM_ACTIVE_MASK		0xffffffffL
21 #else
22 # define RWSEM_ACTIVE_MASK		0x0000ffffL
23 #endif
24 
25 #define RWSEM_UNLOCKED_VALUE		0x00000000L
26 #define RWSEM_ACTIVE_BIAS		0x00000001L
27 #define RWSEM_WAITING_BIAS		(-RWSEM_ACTIVE_MASK-1)
28 #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
29 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
30 
31 /*
32  * lock for reading
33  */
__down_read(struct rw_semaphore * sem)34 static inline void __down_read(struct rw_semaphore *sem)
35 {
36 	if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
37 		rwsem_down_read_failed(sem);
38 }
39 
__down_read_trylock(struct rw_semaphore * sem)40 static inline int __down_read_trylock(struct rw_semaphore *sem)
41 {
42 	long tmp;
43 
44 	while ((tmp = sem->count) >= 0) {
45 		if (tmp == cmpxchg(&sem->count, tmp,
46 				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
47 			return 1;
48 		}
49 	}
50 	return 0;
51 }
52 
53 /*
54  * lock for writing
55  */
__down_write_nested(struct rw_semaphore * sem,int subclass)56 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
57 {
58 	long tmp;
59 
60 	tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
61 				     (atomic_long_t *)&sem->count);
62 	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
63 		rwsem_down_write_failed(sem);
64 }
65 
__down_write(struct rw_semaphore * sem)66 static inline void __down_write(struct rw_semaphore *sem)
67 {
68 	__down_write_nested(sem, 0);
69 }
70 
__down_write_trylock(struct rw_semaphore * sem)71 static inline int __down_write_trylock(struct rw_semaphore *sem)
72 {
73 	long tmp;
74 
75 	tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
76 		      RWSEM_ACTIVE_WRITE_BIAS);
77 	return tmp == RWSEM_UNLOCKED_VALUE;
78 }
79 
80 /*
81  * unlock after reading
82  */
__up_read(struct rw_semaphore * sem)83 static inline void __up_read(struct rw_semaphore *sem)
84 {
85 	long tmp;
86 
87 	tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
88 	if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
89 		rwsem_wake(sem);
90 }
91 
92 /*
93  * unlock after writing
94  */
__up_write(struct rw_semaphore * sem)95 static inline void __up_write(struct rw_semaphore *sem)
96 {
97 	if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
98 				 (atomic_long_t *)&sem->count) < 0))
99 		rwsem_wake(sem);
100 }
101 
102 /*
103  * implement atomic add functionality
104  */
rwsem_atomic_add(long delta,struct rw_semaphore * sem)105 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
106 {
107 	atomic_long_add(delta, (atomic_long_t *)&sem->count);
108 }
109 
110 /*
111  * downgrade write lock to read lock
112  */
__downgrade_write(struct rw_semaphore * sem)113 static inline void __downgrade_write(struct rw_semaphore *sem)
114 {
115 	long tmp;
116 
117 	tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
118 				     (atomic_long_t *)&sem->count);
119 	if (tmp < 0)
120 		rwsem_downgrade_wake(sem);
121 }
122 
123 /*
124  * implement exchange and add functionality
125  */
rwsem_atomic_update(long delta,struct rw_semaphore * sem)126 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
127 {
128 	return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
129 }
130 
131 #endif	/* __KERNEL__ */
132 #endif	/* _ASM_POWERPC_RWSEM_H */
133