1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  *  Derived from "include/asm-i386/spinlock.h"
8  */
9 
10 #ifndef __ASM_SPINLOCK_H
11 #define __ASM_SPINLOCK_H
12 
13 #include <linux/smp.h>
14 #include <asm/atomic_ops.h>
15 #include <asm/barrier.h>
16 #include <asm/processor.h>
17 #include <asm/alternative.h>
18 
19 static __always_inline unsigned int spinlock_lockval(void)
20 {
21 	unsigned long lc_lockval;
22 	unsigned int lockval;
23 
24 	BUILD_BUG_ON(sizeof_field(struct lowcore, spinlock_lockval) != sizeof(lockval));
25 	lc_lockval = offsetof(struct lowcore, spinlock_lockval);
26 	asm_inline(
27 		ALTERNATIVE("   ly      %[lockval],%[offzero](%%r0)\n",
28 			    "   ly      %[lockval],%[offalt](%%r0)\n",
29 			    ALT_FEATURE(MFEATURE_LOWCORE))
30 		: [lockval] "=d" (lockval)
31 		: [offzero] "i" (lc_lockval),
32 		  [offalt] "i" (lc_lockval + LOWCORE_ALT_ADDRESS),
33 		  "m" (((struct lowcore *)0)->spinlock_lockval));
34 	return lockval;
35 }
36 
37 extern int spin_retry;
38 
39 bool arch_vcpu_is_preempted(int cpu);
40 
41 #define vcpu_is_preempted arch_vcpu_is_preempted
42 
43 /*
44  * Simple spin lock operations.  There are two variants, one clears IRQ's
45  * on the local processor, one does not.
46  *
47  * We make no fairness assumptions. They have a cost.
48  *
49  * (the type definitions are in asm/spinlock_types.h)
50  */
51 
52 void arch_spin_relax(arch_spinlock_t *lock);
53 #define arch_spin_relax	arch_spin_relax
54 
55 void arch_spin_lock_wait(arch_spinlock_t *);
56 int arch_spin_trylock_retry(arch_spinlock_t *);
57 void arch_spin_lock_setup(int cpu);
58 
59 static inline u32 arch_spin_lockval(int cpu)
60 {
61 	return cpu + 1;
62 }
63 
64 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
65 {
66 	return lock.lock == 0;
67 }
68 
69 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
70 {
71 	return READ_ONCE(lp->lock) != 0;
72 }
73 
74 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
75 {
76 	int old = 0;
77 
78 	barrier();
79 	return likely(arch_try_cmpxchg(&lp->lock, &old, spinlock_lockval()));
80 }
81 
82 static inline void arch_spin_lock(arch_spinlock_t *lp)
83 {
84 	if (!arch_spin_trylock_once(lp))
85 		arch_spin_lock_wait(lp);
86 }
87 
88 static inline int arch_spin_trylock(arch_spinlock_t *lp)
89 {
90 	if (!arch_spin_trylock_once(lp))
91 		return arch_spin_trylock_retry(lp);
92 	return 1;
93 }
94 
95 static inline void arch_spin_unlock(arch_spinlock_t *lp)
96 {
97 	typecheck(int, lp->lock);
98 	kcsan_release();
99 	asm_inline volatile(
100 		ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", ALT_FACILITY(49)) /* NIAI 7 */
101 		"	mvhhi	%[lock],0\n"
102 		: [lock] "=Q" (((unsigned short *)&lp->lock)[1])
103 		:
104 		: "memory");
105 }
106 
107 /*
108  * Read-write spinlocks, allowing multiple readers
109  * but only one writer.
110  *
111  * NOTE! it is quite common to have readers in interrupts
112  * but no interrupt writers. For those circumstances we
113  * can "mix" irq-safe locks - any writer needs to get a
114  * irq-safe write-lock, but readers can get non-irqsafe
115  * read-locks.
116  */
117 
118 #define arch_read_relax(rw) barrier()
119 #define arch_write_relax(rw) barrier()
120 
121 void arch_read_lock_wait(arch_rwlock_t *lp);
122 void arch_write_lock_wait(arch_rwlock_t *lp);
123 
124 static inline void arch_read_lock(arch_rwlock_t *rw)
125 {
126 	int old;
127 
128 	old = __atomic_add(1, &rw->cnts);
129 	if (old & 0xffff0000)
130 		arch_read_lock_wait(rw);
131 }
132 
133 static inline void arch_read_unlock(arch_rwlock_t *rw)
134 {
135 	__atomic_add_const_barrier(-1, &rw->cnts);
136 }
137 
138 static inline void arch_write_lock(arch_rwlock_t *rw)
139 {
140 	int old = 0;
141 
142 	if (!arch_try_cmpxchg(&rw->cnts, &old, 0x30000))
143 		arch_write_lock_wait(rw);
144 }
145 
146 static inline void arch_write_unlock(arch_rwlock_t *rw)
147 {
148 	__atomic_add_barrier(-0x30000, &rw->cnts);
149 }
150 
151 
152 static inline int arch_read_trylock(arch_rwlock_t *rw)
153 {
154 	int old;
155 
156 	old = READ_ONCE(rw->cnts);
157 	return (!(old & 0xffff0000) && arch_try_cmpxchg(&rw->cnts, &old, old + 1));
158 }
159 
160 static inline int arch_write_trylock(arch_rwlock_t *rw)
161 {
162 	int old;
163 
164 	old = READ_ONCE(rw->cnts);
165 	return !old && arch_try_cmpxchg(&rw->cnts, &old, 0x30000);
166 }
167 
168 #endif /* __ASM_SPINLOCK_H */
169