1 /*
2  * Copyright IBM Corp. 1999, 2009
3  *
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5  */
6 
7 #ifndef __ASM_SYSTEM_H
8 #define __ASM_SYSTEM_H
9 
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <asm/types.h>
13 #include <asm/ptrace.h>
14 #include <asm/setup.h>
15 #include <asm/processor.h>
16 #include <asm/lowcore.h>
17 #include <asm/cmpxchg.h>
18 
19 #ifdef __KERNEL__
20 
21 struct task_struct;
22 
23 extern struct task_struct *__switch_to(void *, void *);
24 extern void update_per_regs(struct task_struct *task);
25 
save_fp_regs(s390_fp_regs * fpregs)26 static inline void save_fp_regs(s390_fp_regs *fpregs)
27 {
28 	asm volatile(
29 		"	std	0,%O0+8(%R0)\n"
30 		"	std	2,%O0+24(%R0)\n"
31 		"	std	4,%O0+40(%R0)\n"
32 		"	std	6,%O0+56(%R0)"
33 		: "=Q" (*fpregs) : "Q" (*fpregs));
34 	if (!MACHINE_HAS_IEEE)
35 		return;
36 	asm volatile(
37 		"	stfpc	%0\n"
38 		"	std	1,%O0+16(%R0)\n"
39 		"	std	3,%O0+32(%R0)\n"
40 		"	std	5,%O0+48(%R0)\n"
41 		"	std	7,%O0+64(%R0)\n"
42 		"	std	8,%O0+72(%R0)\n"
43 		"	std	9,%O0+80(%R0)\n"
44 		"	std	10,%O0+88(%R0)\n"
45 		"	std	11,%O0+96(%R0)\n"
46 		"	std	12,%O0+104(%R0)\n"
47 		"	std	13,%O0+112(%R0)\n"
48 		"	std	14,%O0+120(%R0)\n"
49 		"	std	15,%O0+128(%R0)\n"
50 		: "=Q" (*fpregs) : "Q" (*fpregs));
51 }
52 
restore_fp_regs(s390_fp_regs * fpregs)53 static inline void restore_fp_regs(s390_fp_regs *fpregs)
54 {
55 	asm volatile(
56 		"	ld	0,%O0+8(%R0)\n"
57 		"	ld	2,%O0+24(%R0)\n"
58 		"	ld	4,%O0+40(%R0)\n"
59 		"	ld	6,%O0+56(%R0)"
60 		: : "Q" (*fpregs));
61 	if (!MACHINE_HAS_IEEE)
62 		return;
63 	asm volatile(
64 		"	lfpc	%0\n"
65 		"	ld	1,%O0+16(%R0)\n"
66 		"	ld	3,%O0+32(%R0)\n"
67 		"	ld	5,%O0+48(%R0)\n"
68 		"	ld	7,%O0+64(%R0)\n"
69 		"	ld	8,%O0+72(%R0)\n"
70 		"	ld	9,%O0+80(%R0)\n"
71 		"	ld	10,%O0+88(%R0)\n"
72 		"	ld	11,%O0+96(%R0)\n"
73 		"	ld	12,%O0+104(%R0)\n"
74 		"	ld	13,%O0+112(%R0)\n"
75 		"	ld	14,%O0+120(%R0)\n"
76 		"	ld	15,%O0+128(%R0)\n"
77 		: : "Q" (*fpregs));
78 }
79 
save_access_regs(unsigned int * acrs)80 static inline void save_access_regs(unsigned int *acrs)
81 {
82 	asm volatile("stam 0,15,%0" : "=Q" (*acrs));
83 }
84 
restore_access_regs(unsigned int * acrs)85 static inline void restore_access_regs(unsigned int *acrs)
86 {
87 	asm volatile("lam 0,15,%0" : : "Q" (*acrs));
88 }
89 
90 #define switch_to(prev,next,last) do {					\
91 	if (prev->mm) {							\
92 		save_fp_regs(&prev->thread.fp_regs);			\
93 		save_access_regs(&prev->thread.acrs[0]);		\
94 	}								\
95 	if (next->mm) {							\
96 		restore_fp_regs(&next->thread.fp_regs);			\
97 		restore_access_regs(&next->thread.acrs[0]);		\
98 		update_per_regs(next);					\
99 	}								\
100 	prev = __switch_to(prev,next);					\
101 } while (0)
102 
103 extern void account_vtime(struct task_struct *, struct task_struct *);
104 extern void account_tick_vtime(struct task_struct *);
105 
106 #ifdef CONFIG_PFAULT
107 extern int pfault_init(void);
108 extern void pfault_fini(void);
109 #else /* CONFIG_PFAULT */
110 #define pfault_init()		({-1;})
111 #define pfault_fini()		do { } while (0)
112 #endif /* CONFIG_PFAULT */
113 
114 extern void cmma_init(void);
115 extern int memcpy_real(void *, void *, size_t);
116 extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
117 extern int copy_to_user_real(void __user *dest, void *src, size_t count);
118 extern int copy_from_user_real(void *dest, void __user *src, size_t count);
119 
120 #define finish_arch_switch(prev) do {					     \
121 	set_fs(current->thread.mm_segment);				     \
122 	account_vtime(prev, current);					     \
123 } while (0)
124 
125 #define nop() asm volatile("nop")
126 
127 /*
128  * Force strict CPU ordering.
129  * And yes, this is required on UP too when we're talking
130  * to devices.
131  *
132  * This is very similar to the ppc eieio/sync instruction in that is
133  * does a checkpoint syncronisation & makes sure that
134  * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
135  */
136 
137 #define eieio()	asm volatile("bcr 15,0" : : : "memory")
138 #define SYNC_OTHER_CORES(x)   eieio()
139 #define mb()    eieio()
140 #define rmb()   eieio()
141 #define wmb()   eieio()
142 #define read_barrier_depends() do { } while(0)
143 #define smp_mb()       mb()
144 #define smp_rmb()      rmb()
145 #define smp_wmb()      wmb()
146 #define smp_read_barrier_depends()    read_barrier_depends()
147 #define smp_mb__before_clear_bit()     smp_mb()
148 #define smp_mb__after_clear_bit()      smp_mb()
149 
150 
151 #define set_mb(var, value)      do { var = value; mb(); } while (0)
152 
153 #ifdef __s390x__
154 
155 #define __ctl_load(array, low, high) ({				\
156 	typedef struct { char _[sizeof(array)]; } addrtype;	\
157 	asm volatile(						\
158 		"	lctlg	%1,%2,%0\n"			\
159 		: : "Q" (*(addrtype *)(&array)),		\
160 		    "i" (low), "i" (high));			\
161 	})
162 
163 #define __ctl_store(array, low, high) ({			\
164 	typedef struct { char _[sizeof(array)]; } addrtype;	\
165 	asm volatile(						\
166 		"	stctg	%1,%2,%0\n"			\
167 		: "=Q" (*(addrtype *)(&array))			\
168 		: "i" (low), "i" (high));			\
169 	})
170 
171 #else /* __s390x__ */
172 
173 #define __ctl_load(array, low, high) ({				\
174 	typedef struct { char _[sizeof(array)]; } addrtype;	\
175 	asm volatile(						\
176 		"	lctl	%1,%2,%0\n"			\
177 		: : "Q" (*(addrtype *)(&array)),		\
178 		    "i" (low), "i" (high));			\
179 })
180 
181 #define __ctl_store(array, low, high) ({			\
182 	typedef struct { char _[sizeof(array)]; } addrtype;	\
183 	asm volatile(						\
184 		"	stctl	%1,%2,%0\n"			\
185 		: "=Q" (*(addrtype *)(&array))			\
186 		: "i" (low), "i" (high));			\
187 	})
188 
189 #endif /* __s390x__ */
190 
191 #define __ctl_set_bit(cr, bit) ({	\
192 	unsigned long __dummy;		\
193 	__ctl_store(__dummy, cr, cr);	\
194 	__dummy |= 1UL << (bit);	\
195 	__ctl_load(__dummy, cr, cr);	\
196 })
197 
198 #define __ctl_clear_bit(cr, bit) ({	\
199 	unsigned long __dummy;		\
200 	__ctl_store(__dummy, cr, cr);	\
201 	__dummy &= ~(1UL << (bit));	\
202 	__ctl_load(__dummy, cr, cr);	\
203 })
204 
205 /*
206  * Use to set psw mask except for the first byte which
207  * won't be changed by this function.
208  */
209 static inline void
__set_psw_mask(unsigned long mask)210 __set_psw_mask(unsigned long mask)
211 {
212 	__load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
213 }
214 
215 #define local_mcck_enable() \
216 	__set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
217 #define local_mcck_disable() \
218 	__set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
219 
220 #ifdef CONFIG_SMP
221 
222 extern void smp_ctl_set_bit(int cr, int bit);
223 extern void smp_ctl_clear_bit(int cr, int bit);
224 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
225 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
226 
227 #else
228 
229 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
230 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
231 
232 #endif /* CONFIG_SMP */
233 
234 #define MAX_FACILITY_BIT (256*8)	/* stfle_fac_list has 256 bytes */
235 
236 /*
237  * The test_facility function uses the bit odering where the MSB is bit 0.
238  * That makes it easier to query facility bits with the bit number as
239  * documented in the Principles of Operation.
240  */
test_facility(unsigned long nr)241 static inline int test_facility(unsigned long nr)
242 {
243 	unsigned char *ptr;
244 
245 	if (nr >= MAX_FACILITY_BIT)
246 		return 0;
247 	ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
248 	return (*ptr & (0x80 >> (nr & 7))) != 0;
249 }
250 
stap(void)251 static inline unsigned short stap(void)
252 {
253 	unsigned short cpu_address;
254 
255 	asm volatile("stap %0" : "=m" (cpu_address));
256 	return cpu_address;
257 }
258 
259 extern void (*_machine_restart)(char *command);
260 extern void (*_machine_halt)(void);
261 extern void (*_machine_power_off)(void);
262 
263 extern unsigned long arch_align_stack(unsigned long sp);
264 
tprot(unsigned long addr)265 static inline int tprot(unsigned long addr)
266 {
267 	int rc = -EFAULT;
268 
269 	asm volatile(
270 		"	tprot	0(%1),0\n"
271 		"0:	ipm	%0\n"
272 		"	srl	%0,28\n"
273 		"1:\n"
274 		EX_TABLE(0b,1b)
275 		: "+d" (rc) : "a" (addr) : "cc");
276 	return rc;
277 }
278 
279 #endif /* __KERNEL__ */
280 
281 #endif
282