1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 *    S390 low-level entry points.
4 *
5 *    Copyright IBM Corp. 1999, 2012
6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 *		 Hartmut Penner (hp@de.ibm.com),
8 *		 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 */
10
11#include <linux/export.h>
12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/asm-extable.h>
15#include <asm/alternative.h>
16#include <asm/processor.h>
17#include <asm/cache.h>
18#include <asm/dwarf.h>
19#include <asm/errno.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24#include <asm/page.h>
25#include <asm/sigp.h>
26#include <asm/irq.h>
27#include <asm/fpu-insn.h>
28#include <asm/setup.h>
29#include <asm/nmi.h>
30#include <asm/nospec-insn.h>
31#include <asm/lowcore.h>
32#include <asm/machine.h>
33
34_LPP_OFFSET	= __LC_LPP
35
36	.macro STBEAR address
37	ALTERNATIVE "nop", ".insn s,0xb2010000,\address", ALT_FACILITY(193)
38	.endm
39
40	.macro LBEAR address
41	ALTERNATIVE "nop", ".insn s,0xb2000000,\address", ALT_FACILITY(193)
42	.endm
43
44	.macro LPSWEY address, lpswe
45	ALTERNATIVE_2 "b \lpswe;nopr", \
46		".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193),		\
47		__stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0),	\
48		ALT_FEATURE(MFEATURE_LOWCORE)
49	.endm
50
51	.macro MBEAR reg, lowcore
52	ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK(\lowcore)),\
53		ALT_FACILITY(193)
54	.endm
55
56	.macro	CHECK_VMAP_STACK savearea, lowcore, oklabel
57	lgr	%r14,%r15
58	nill	%r14,0x10000 - THREAD_SIZE
59	oill	%r14,STACK_INIT_OFFSET
60	clg	%r14,__LC_KERNEL_STACK(\lowcore)
61	je	\oklabel
62	clg	%r14,__LC_ASYNC_STACK(\lowcore)
63	je	\oklabel
64	clg	%r14,__LC_MCCK_STACK(\lowcore)
65	je	\oklabel
66	clg	%r14,__LC_NODAT_STACK(\lowcore)
67	je	\oklabel
68	clg	%r14,__LC_RESTART_STACK(\lowcore)
69	je	\oklabel
70	la	%r14,\savearea(\lowcore)
71	j	stack_invalid
72	.endm
73
74	/*
75	 * The TSTMSK macro generates a test-under-mask instruction by
76	 * calculating the memory offset for the specified mask value.
77	 * Mask value can be any constant.  The macro shifts the mask
78	 * value to calculate the memory offset for the test-under-mask
79	 * instruction.
80	 */
81	.macro TSTMSK addr, mask, size=8, bytepos=0
82		.if (\bytepos < \size) && (\mask >> 8)
83			.if (\mask & 0xff)
84				.error "Mask exceeds byte boundary"
85			.endif
86			TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
87			.exitm
88		.endif
89		.ifeq \mask
90			.error "Mask must not be zero"
91		.endif
92		off = \size - \bytepos - 1
93		tm	off+\addr, \mask
94	.endm
95
96	.macro BPOFF
97	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", ALT_SPEC(82)
98	.endm
99
100	.macro BPON
101	ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
102	.endm
103
104	.macro BPENTER tif_ptr,tif_mask
105	ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
106		    "j .+12; nop; nop", ALT_SPEC(82)
107	.endm
108
109	.macro BPEXIT tif_ptr,tif_mask
110	TSTMSK	\tif_ptr,\tif_mask
111	ALTERNATIVE "jz .+8;  .insn rrf,0xb2e80000,0,0,12,0", \
112		    "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)
113	.endm
114
115#if IS_ENABLED(CONFIG_KVM)
116	.macro SIEEXIT sie_control,lowcore
117	lg	%r9,\sie_control			# get control block pointer
118	ni	__SIE_PROG0C+3(%r9),0xfe		# no longer in SIE
119	lctlg	%c1,%c1,__LC_KERNEL_ASCE(\lowcore)	# load primary asce
120	lg	%r9,__LC_CURRENT(\lowcore)
121	mvi	__TI_sie(%r9),0
122	larl	%r9,sie_exit			# skip forward to sie_exit
123	.endm
124#endif
125
126	.macro STACKLEAK_ERASE
127#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
128	brasl	%r14,stackleak_erase_on_task_stack
129#endif
130	.endm
131
132	GEN_BR_THUNK %r14
133
134	.section .kprobes.text, "ax"
135.Ldummy:
136	/*
137	 * The following nop exists only in order to avoid that the next
138	 * symbol starts at the beginning of the kprobes text section.
139	 * In that case there would be several symbols at the same address.
140	 * E.g. objdump would take an arbitrary symbol when disassembling
141	 * the code.
142	 * With the added nop in between this cannot happen.
143	 */
144	nop	0
145
146/*
147 * Scheduler resume function, called by __switch_to
148 *  gpr2 = (task_struct *)prev
149 *  gpr3 = (task_struct *)next
150 * Returns:
151 *  gpr2 = prev
152 */
153SYM_FUNC_START(__switch_to_asm)
154	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
155	lghi	%r4,__TASK_stack
156	lghi	%r1,__TASK_thread
157	llill	%r5,STACK_INIT_OFFSET
158	stg	%r15,__THREAD_ksp(%r1,%r2)	# store kernel stack of prev
159	lg	%r15,0(%r4,%r3)			# start of kernel stack of next
160	agr	%r15,%r5			# end of kernel stack of next
161	GET_LC	%r13
162	stg	%r3,__LC_CURRENT(%r13)		# store task struct of next
163	stg	%r15,__LC_KERNEL_STACK(%r13)	# store end of kernel stack
164	lg	%r15,__THREAD_ksp(%r1,%r3)	# load kernel stack of next
165	aghi	%r3,__TASK_pid
166	mvc	__LC_CURRENT_PID(4,%r13),0(%r3)	# store pid of next
167	ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40)
168	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
169	BR_EX	%r14
170SYM_FUNC_END(__switch_to_asm)
171
172#if IS_ENABLED(CONFIG_KVM)
173/*
174 * __sie64a calling convention:
175 * %r2 pointer to sie control block phys
176 * %r3 pointer to sie control block virt
177 * %r4 guest register save area
178 * %r5 guest asce
179 */
180SYM_FUNC_START(__sie64a)
181	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
182	GET_LC	%r13
183	lg	%r14,__LC_CURRENT(%r13)
184	stg	%r2,__SF_SIE_CONTROL_PHYS(%r15)	# save sie block physical..
185	stg	%r3,__SF_SIE_CONTROL(%r15)	# ...and virtual addresses
186	stg	%r4,__SF_SIE_SAVEAREA(%r15)	# save guest register save area
187	stg	%r5,__SF_SIE_GUEST_ASCE(%r15)	# save guest asce
188	xc	__SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
189	mvc	__SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags
190	lmg	%r0,%r13,0(%r4)			# load guest gprs 0-13
191	mvi	__TI_sie(%r14),1
192	lctlg	%c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce
193	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
194	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
195	tm	__SIE_PROG20+3(%r14),3		# last exit...
196	jnz	.Lsie_skip
197	lg	%r14,__SF_SIE_CONTROL_PHYS(%r15)	# get sie block phys addr
198	BPEXIT	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
199.Lsie_entry:
200	sie	0(%r14)
201# Let the next instruction be NOP to avoid triggering a machine check
202# and handling it in a guest as result of the instruction execution.
203	nopr	7
204.Lsie_leave:
205	BPOFF
206	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
207.Lsie_skip:
208	lg	%r14,__SF_SIE_CONTROL(%r15)	# get control block pointer
209	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
210	GET_LC	%r14
211	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r14)	# load primary asce
212	lg	%r14,__LC_CURRENT(%r14)
213	mvi	__TI_sie(%r14),0
214SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
215	lg	%r14,__SF_SIE_SAVEAREA(%r15)	# load guest register save area
216	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
217	xgr	%r0,%r0				# clear guest registers to
218	xgr	%r1,%r1				# prevent speculative use
219	xgr	%r3,%r3
220	xgr	%r4,%r4
221	xgr	%r5,%r5
222	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
223	lg	%r2,__SF_SIE_REASON(%r15)	# return exit reason code
224	BR_EX	%r14
225SYM_FUNC_END(__sie64a)
226EXPORT_SYMBOL(__sie64a)
227EXPORT_SYMBOL(sie_exit)
228#endif
229
230/*
231 * SVC interrupt handler routine. System calls are synchronous events and
232 * are entered with interrupts disabled.
233 */
234
235SYM_CODE_START(system_call)
236	STMG_LC	%r8,%r15,__LC_SAVE_AREA
237	GET_LC	%r13
238	stpt	__LC_SYS_ENTER_TIMER(%r13)
239	BPOFF
240	lghi	%r14,0
241.Lsysc_per:
242	STBEAR	__LC_LAST_BREAK(%r13)
243	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
244	lg	%r15,__LC_KERNEL_STACK(%r13)
245	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
246	stmg	%r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
247	# clear user controlled register to prevent speculative use
248	xgr	%r0,%r0
249	xgr	%r1,%r1
250	xgr	%r4,%r4
251	xgr	%r5,%r5
252	xgr	%r6,%r6
253	xgr	%r7,%r7
254	xgr	%r8,%r8
255	xgr	%r9,%r9
256	xgr	%r10,%r10
257	xgr	%r11,%r11
258	la	%r2,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
259	mvc	__PT_R8(64,%r2),__LC_SAVE_AREA(%r13)
260	MBEAR	%r2,%r13
261	lgr	%r3,%r14
262	brasl	%r14,__do_syscall
263	STACKLEAK_ERASE
264	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
265	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
266	BPON
267	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
268	stpt	__LC_EXIT_TIMER(%r13)
269	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
270	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
271SYM_CODE_END(system_call)
272
273#
274# a new process exits the kernel with ret_from_fork
275#
276SYM_CODE_START(ret_from_fork)
277	lgr	%r3,%r11
278	brasl	%r14,__ret_from_fork
279	STACKLEAK_ERASE
280	GET_LC	%r13
281	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
282	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
283	BPON
284	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
285	stpt	__LC_EXIT_TIMER(%r13)
286	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
287	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
288SYM_CODE_END(ret_from_fork)
289
290/*
291 * Program check handler routine
292 */
293
294SYM_CODE_START(pgm_check_handler)
295	STMG_LC	%r8,%r15,__LC_SAVE_AREA
296	GET_LC	%r13
297	stpt	__LC_SYS_ENTER_TIMER(%r13)
298	BPOFF
299	lmg	%r8,%r9,__LC_PGM_OLD_PSW(%r13)
300	xgr	%r10,%r10
301	tmhh	%r8,0x0001		# coming from user space?
302	jno	.Lpgm_skip_asce
303	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
304	j	3f			# -> fault in user space
305.Lpgm_skip_asce:
306#if IS_ENABLED(CONFIG_KVM)
307	lg	%r11,__LC_CURRENT(%r13)
308	tm	__TI_sie(%r11),0xff
309	jz	1f
310	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
311	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
312	lghi	%r10,_PIF_GUEST_FAULT
313#endif
3141:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
315	jnz	2f			# -> enabled, can't be a double fault
316	tm	__LC_PGM_ILC+3(%r13),0x80	# check for per exception
317	jnz	.Lpgm_svcper		# -> single stepped svc
3182:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
319	# CHECK_VMAP_STACK branches to stack_invalid or 4f
320	CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f
3213:	lg	%r15,__LC_KERNEL_STACK(%r13)
3224:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
323	stg	%r10,__PT_FLAGS(%r11)
324	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
325	stmg	%r0,%r7,__PT_R0(%r11)
326	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
327	mvc	__PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13)
328	stmg	%r8,%r9,__PT_PSW(%r11)
329	# clear user controlled registers to prevent speculative use
330	xgr	%r0,%r0
331	xgr	%r1,%r1
332	xgr	%r3,%r3
333	xgr	%r4,%r4
334	xgr	%r5,%r5
335	xgr	%r6,%r6
336	xgr	%r7,%r7
337	xgr	%r12,%r12
338	lgr	%r2,%r11
339	brasl	%r14,__do_pgm_check
340	tmhh	%r8,0x0001		# returning to user space?
341	jno	.Lpgm_exit_kernel
342	STACKLEAK_ERASE
343	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
344	BPON
345	stpt	__LC_EXIT_TIMER(%r13)
346.Lpgm_exit_kernel:
347	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
348	LBEAR	STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
349	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
350	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
351
352#
353# single stepped system call
354#
355.Lpgm_svcper:
356	mvc	__LC_RETURN_PSW(8,%r13),__LC_SVC_NEW_PSW(%r13)
357	larl	%r14,.Lsysc_per
358	stg	%r14,__LC_RETURN_PSW+8(%r13)
359	lghi	%r14,1
360	LBEAR	__LC_PGM_LAST_BREAK(%r13)
361	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
362SYM_CODE_END(pgm_check_handler)
363
364/*
365 * Interrupt handler macro used for external and IO interrupts.
366 */
367.macro INT_HANDLER name,lc_old_psw,handler
368SYM_CODE_START(\name)
369	STMG_LC	%r8,%r15,__LC_SAVE_AREA
370	GET_LC	%r13
371	stckf	__LC_INT_CLOCK(%r13)
372	stpt	__LC_SYS_ENTER_TIMER(%r13)
373	STBEAR	__LC_LAST_BREAK(%r13)
374	BPOFF
375	lmg	%r8,%r9,\lc_old_psw(%r13)
376	tmhh	%r8,0x0001			# interrupting from user ?
377	jnz	1f
378#if IS_ENABLED(CONFIG_KVM)
379	lg	%r10,__LC_CURRENT(%r13)
380	tm	__TI_sie(%r10),0xff
381	jz	0f
382	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
383	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
384#endif
3850:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
386	j	2f
3871:	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
388	lg	%r15,__LC_KERNEL_STACK(%r13)
3892:	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
390	la	%r11,STACK_FRAME_OVERHEAD(%r15)
391	stmg	%r0,%r7,__PT_R0(%r11)
392	# clear user controlled registers to prevent speculative use
393	xgr	%r0,%r0
394	xgr	%r1,%r1
395	xgr	%r3,%r3
396	xgr	%r4,%r4
397	xgr	%r5,%r5
398	xgr	%r6,%r6
399	xgr	%r7,%r7
400	xgr	%r10,%r10
401	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
402	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
403	MBEAR	%r11,%r13
404	stmg	%r8,%r9,__PT_PSW(%r11)
405	lgr	%r2,%r11		# pass pointer to pt_regs
406	brasl	%r14,\handler
407	mvc	__LC_RETURN_PSW(16,%r13),__PT_PSW(%r11)
408	tmhh	%r8,0x0001		# returning to user ?
409	jno	2f
410	STACKLEAK_ERASE
411	lctlg	%c1,%c1,__LC_USER_ASCE(%r13)
412	BPON
413	stpt	__LC_EXIT_TIMER(%r13)
4142:	LBEAR	__PT_LAST_BREAK(%r11)
415	lmg	%r0,%r15,__PT_R0(%r11)
416	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
417SYM_CODE_END(\name)
418.endm
419
420	.section .irqentry.text, "ax"
421
422INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
423INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
424
425	.section .kprobes.text, "ax"
426
427/*
428 * Machine check handler routines
429 */
430SYM_CODE_START(mcck_int_handler)
431	BPOFF
432	GET_LC	%r13
433	lmg	%r8,%r9,__LC_MCK_OLD_PSW(%r13)
434	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_SYSTEM_DAMAGE
435	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
436	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_CR_VALID
437	jno	.Lmcck_panic		# control registers invalid -> panic
438	ptlb
439	lay	%r14,__LC_CPU_TIMER_SAVE_AREA(%r13)
440	mvc	__LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
441	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_CPU_TIMER_VALID
442	jo	3f
443	la	%r14,__LC_SYS_ENTER_TIMER(%r13)
444	clc	0(8,%r14),__LC_EXIT_TIMER(%r13)
445	jl	1f
446	la	%r14,__LC_EXIT_TIMER(%r13)
4471:	clc	0(8,%r14),__LC_LAST_UPDATE_TIMER(%r13)
448	jl	2f
449	la	%r14,__LC_LAST_UPDATE_TIMER(%r13)
4502:	spt	0(%r14)
451	mvc	__LC_MCCK_ENTER_TIMER(8,%r13),0(%r14)
4523:	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_PSW_MWP_VALID
453	jno	.Lmcck_panic
454	tmhh	%r8,0x0001		# interrupting from user ?
455	jnz	.Lmcck_user
456	TSTMSK	__LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID
457	jno	.Lmcck_panic
458#if IS_ENABLED(CONFIG_KVM)
459	lg	%r10,__LC_CURRENT(%r13)
460	tm	__TI_sie(%r10),0xff
461	jz	.Lmcck_user
462	# Need to compare the address instead of __TI_SIE flag.
463	# Otherwise there would be a race between setting the flag
464	# and entering SIE (or leaving and clearing the flag). This
465	# would cause machine checks targeted at the guest to be
466	# handled by the host.
467	larl	%r14,.Lsie_entry
468	clgrjl	%r9,%r14, 4f
469	larl	%r14,.Lsie_leave
470	clgrjhe	%r9,%r14, 4f
471	lg	%r10,__LC_PCPU(%r13)
472	oi	__PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST
4734:	BPENTER	__SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
474	SIEEXIT __SF_SIE_CONTROL(%r15),%r13
475#endif
476.Lmcck_user:
477	lg	%r15,__LC_MCCK_STACK(%r13)
478	la	%r11,STACK_FRAME_OVERHEAD(%r15)
479	stctg	%c1,%c1,__PT_CR1(%r11)
480	lctlg	%c1,%c1,__LC_KERNEL_ASCE(%r13)
481	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
482	lay	%r14,__LC_GPREGS_SAVE_AREA(%r13)
483	mvc	__PT_R0(128,%r11),0(%r14)
484	# clear user controlled registers to prevent speculative use
485	xgr	%r0,%r0
486	xgr	%r1,%r1
487	xgr	%r3,%r3
488	xgr	%r4,%r4
489	xgr	%r5,%r5
490	xgr	%r6,%r6
491	xgr	%r7,%r7
492	xgr	%r10,%r10
493	stmg	%r8,%r9,__PT_PSW(%r11)
494	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
495	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
496	lgr	%r2,%r11		# pass pointer to pt_regs
497	brasl	%r14,s390_do_machine_check
498	lctlg	%c1,%c1,__PT_CR1(%r11)
499	lmg	%r0,%r10,__PT_R0(%r11)
500	mvc	__LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW
501	tm	__LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ?
502	jno	0f
503	BPON
504	stpt	__LC_EXIT_TIMER(%r13)
5050:	ALTERNATIVE "brcl 0,0", __stringify(lay %r12,__LC_LAST_BREAK_SAVE_AREA(%r13)),\
506		ALT_FACILITY(193)
507	LBEAR	0(%r12)
508	lmg	%r11,%r15,__PT_R11(%r11)
509	LPSWEY	__LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
510
511.Lmcck_panic:
512	/*
513	 * Iterate over all possible CPU addresses in the range 0..0xffff
514	 * and stop each CPU using signal processor. Use compare and swap
515	 * to allow just one CPU-stopper and prevent concurrent CPUs from
516	 * stopping each other while leaving the others running.
517	 */
518	lhi	%r5,0
519	lhi	%r6,1
520	larl	%r7,stop_lock
521	cs	%r5,%r6,0(%r7)		# single CPU-stopper only
522	jnz	4f
523	larl	%r7,this_cpu
524	stap	0(%r7)			# this CPU address
525	lh	%r4,0(%r7)
526	nilh	%r4,0
527	lhi	%r0,1
528	sll	%r0,16			# CPU counter
529	lhi	%r3,0			# next CPU address
5300:	cr	%r3,%r4
531	je	2f
5321:	sigp	%r1,%r3,SIGP_STOP	# stop next CPU
533	brc	SIGP_CC_BUSY,1b
5342:	ahi	%r3,1
535	brct	%r0,0b
5363:	sigp	%r1,%r4,SIGP_STOP	# stop this CPU
537	brc	SIGP_CC_BUSY,3b
5384:	j	4b
539SYM_CODE_END(mcck_int_handler)
540
541SYM_CODE_START(restart_int_handler)
542	ALTERNATIVE "nop", "lpp _LPP_OFFSET", ALT_FACILITY(40)
543	stg	%r15,__LC_SAVE_AREA_RESTART
544	TSTMSK	__LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
545	jz	0f
546	lctlg	%c0,%c15,__LC_CREGS_SAVE_AREA
5470:	larl	%r15,daton_psw
548	lpswe	0(%r15)				# turn dat on, keep irqs off
549.Ldaton:
550	GET_LC	%r15
551	lg	%r15,__LC_RESTART_STACK(%r15)
552	xc	STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
553	stmg	%r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
554	GET_LC	%r13
555	mvc	STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART(%r13)
556	mvc	STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW(%r13)
557	xc	0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
558	lg	%r1,__LC_RESTART_FN(%r13)	# load fn, parm & source cpu
559	lg	%r2,__LC_RESTART_DATA(%r13)
560	lgf	%r3,__LC_RESTART_SOURCE(%r13)
561	ltgr	%r3,%r3				# test source cpu address
562	jm	1f				# negative -> skip source stop
5630:	sigp	%r4,%r3,SIGP_SENSE		# sigp sense to source cpu
564	brc	10,0b				# wait for status stored
5651:	basr	%r14,%r1			# call function
566	stap	__SF_EMPTY(%r15)		# store cpu address
567	llgh	%r3,__SF_EMPTY(%r15)
5682:	sigp	%r4,%r3,SIGP_STOP		# sigp stop to current cpu
569	brc	2,2b
5703:	j	3b
571SYM_CODE_END(restart_int_handler)
572
573	__INIT
574SYM_CODE_START(early_pgm_check_handler)
575	STMG_LC %r8,%r15,__LC_SAVE_AREA
576	GET_LC	%r13
577	aghi	%r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE)
578	la	%r11,STACK_FRAME_OVERHEAD(%r15)
579	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
580	stmg	%r0,%r7,__PT_R0(%r11)
581	mvc	__PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13)
582	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA(%r13)
583	lgr	%r2,%r11
584	brasl	%r14,__do_early_pgm_check
585	mvc	__LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
586	lmg	%r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
587	LPSWEY	__LC_RETURN_PSW,__LC_RETURN_LPSWE
588SYM_CODE_END(early_pgm_check_handler)
589	__FINIT
590
591	.section .kprobes.text, "ax"
592
593/*
594 * The synchronous or the asynchronous stack pointer is invalid. We are dead.
595 * No need to properly save the registers, we are going to panic anyway.
596 * Setup a pt_regs so that show_trace can provide a good call trace.
597 */
598SYM_CODE_START(stack_invalid)
599	GET_LC	%r15
600	lg	%r15,__LC_NODAT_STACK(%r15) # change to panic stack
601	la	%r11,STACK_FRAME_OVERHEAD(%r15)
602	stmg	%r0,%r7,__PT_R0(%r11)
603	stmg	%r8,%r9,__PT_PSW(%r11)
604	mvc	__PT_R8(64,%r11),0(%r14)
605	GET_LC	%r2
606	mvc	__PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2)
607	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
608	lgr	%r2,%r11		# pass pointer to pt_regs
609	jg	kernel_stack_invalid
610SYM_CODE_END(stack_invalid)
611
612	.section .data, "aw"
613	.balign	4
614SYM_DATA_LOCAL(stop_lock,	.long 0)
615SYM_DATA_LOCAL(this_cpu,	.short 0)
616	.balign	8
617SYM_DATA_START_LOCAL(daton_psw)
618	.quad	PSW_KERNEL_BITS
619	.quad	.Ldaton
620SYM_DATA_END(daton_psw)
621
622	.section .rodata, "a"
623	.balign	8
624#define SYSCALL(esame,emu)	.quad __s390x_ ## esame
625SYM_DATA_START(sys_call_table)
626#include <asm/syscall_table.h>
627SYM_DATA_END(sys_call_table)
628#undef SYSCALL
629
630#ifdef CONFIG_COMPAT
631
632#define SYSCALL(esame,emu)	.quad __s390_ ## emu
633SYM_DATA_START(sys_call_table_emu)
634#include <asm/syscall_table.h>
635SYM_DATA_END(sys_call_table_emu)
636#undef SYSCALL
637#endif
638