1/*
2 *
3 *  Copyright (C) 1991, 1992  Linus Torvalds
4 */
5
6/*
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
10 *
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
13 *
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
15 * on a 486.
16 *
17 * Stack layout in 'syscall_exit':
18 * 	ptrace needs to have all regs on the stack.
19 *	if the order here is changed, it needs to be
20 *	updated in fork.c:copy_process, signal.c:do_signal,
21 *	ptrace.c and ptrace.h
22 *
23 *	 0(%esp) - %ebx
24 *	 4(%esp) - %ecx
25 *	 8(%esp) - %edx
26 *       C(%esp) - %esi
27 *	10(%esp) - %edi
28 *	14(%esp) - %ebp
29 *	18(%esp) - %eax
30 *	1C(%esp) - %ds
31 *	20(%esp) - %es
32 *	24(%esp) - %fs
33 *	28(%esp) - %gs		saved iff !CONFIG_X86_32_LAZY_GS
34 *	2C(%esp) - orig_eax
35 *	30(%esp) - %eip
36 *	34(%esp) - %cs
37 *	38(%esp) - %eflags
38 *	3C(%esp) - %oldesp
39 *	40(%esp) - %oldss
40 *
41 * "current" is in register %ebx during any slow entries.
42 */
43
44#include <linux/linkage.h>
45#include <linux/err.h>
46#include <asm/thread_info.h>
47#include <asm/irqflags.h>
48#include <asm/errno.h>
49#include <asm/segment.h>
50#include <asm/smp.h>
51#include <asm/page_types.h>
52#include <asm/percpu.h>
53#include <asm/dwarf2.h>
54#include <asm/processor-flags.h>
55#include <asm/ftrace.h>
56#include <asm/irq_vectors.h>
57#include <asm/cpufeature.h>
58#include <asm/alternative-asm.h>
59
60/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
61#include <linux/elf-em.h>
62#define AUDIT_ARCH_I386		(EM_386|__AUDIT_ARCH_LE)
63#define __AUDIT_ARCH_LE	   0x40000000
64
65#ifndef CONFIG_AUDITSYSCALL
66#define sysenter_audit	syscall_trace_entry
67#define sysexit_audit	syscall_exit_work
68#endif
69
70	.section .entry.text, "ax"
71
72/*
73 * We use macros for low-level operations which need to be overridden
74 * for paravirtualization.  The following will never clobber any registers:
75 *   INTERRUPT_RETURN (aka. "iret")
76 *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
77 *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
78 *
79 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
80 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
81 * Allowing a register to be clobbered can shrink the paravirt replacement
82 * enough to patch inline, increasing performance.
83 */
84
85#ifdef CONFIG_PREEMPT
86#define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
87#else
88#define preempt_stop(clobbers)
89#define resume_kernel		restore_all
90#endif
91
92.macro TRACE_IRQS_IRET
93#ifdef CONFIG_TRACE_IRQFLAGS
94	testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)     # interrupts off?
95	jz 1f
96	TRACE_IRQS_ON
971:
98#endif
99.endm
100
101#ifdef CONFIG_VM86
102#define resume_userspace_sig	check_userspace
103#else
104#define resume_userspace_sig	resume_userspace
105#endif
106
107/*
108 * User gs save/restore
109 *
110 * %gs is used for userland TLS and kernel only uses it for stack
111 * canary which is required to be at %gs:20 by gcc.  Read the comment
112 * at the top of stackprotector.h for more info.
113 *
114 * Local labels 98 and 99 are used.
115 */
116#ifdef CONFIG_X86_32_LAZY_GS
117
118 /* unfortunately push/pop can't be no-op */
119.macro PUSH_GS
120	pushl_cfi $0
121.endm
122.macro POP_GS pop=0
123	addl $(4 + \pop), %esp
124	CFI_ADJUST_CFA_OFFSET -(4 + \pop)
125.endm
126.macro POP_GS_EX
127.endm
128
129 /* all the rest are no-op */
130.macro PTGS_TO_GS
131.endm
132.macro PTGS_TO_GS_EX
133.endm
134.macro GS_TO_REG reg
135.endm
136.macro REG_TO_PTGS reg
137.endm
138.macro SET_KERNEL_GS reg
139.endm
140
141#else	/* CONFIG_X86_32_LAZY_GS */
142
143.macro PUSH_GS
144	pushl_cfi %gs
145	/*CFI_REL_OFFSET gs, 0*/
146.endm
147
148.macro POP_GS pop=0
14998:	popl_cfi %gs
150	/*CFI_RESTORE gs*/
151  .if \pop <> 0
152	add $\pop, %esp
153	CFI_ADJUST_CFA_OFFSET -\pop
154  .endif
155.endm
156.macro POP_GS_EX
157.pushsection .fixup, "ax"
15899:	movl $0, (%esp)
159	jmp 98b
160.section __ex_table, "a"
161	.align 4
162	.long 98b, 99b
163.popsection
164.endm
165
166.macro PTGS_TO_GS
16798:	mov PT_GS(%esp), %gs
168.endm
169.macro PTGS_TO_GS_EX
170.pushsection .fixup, "ax"
17199:	movl $0, PT_GS(%esp)
172	jmp 98b
173.section __ex_table, "a"
174	.align 4
175	.long 98b, 99b
176.popsection
177.endm
178
179.macro GS_TO_REG reg
180	movl %gs, \reg
181	/*CFI_REGISTER gs, \reg*/
182.endm
183.macro REG_TO_PTGS reg
184	movl \reg, PT_GS(%esp)
185	/*CFI_REL_OFFSET gs, PT_GS*/
186.endm
187.macro SET_KERNEL_GS reg
188	movl $(__KERNEL_STACK_CANARY), \reg
189	movl \reg, %gs
190.endm
191
192#endif	/* CONFIG_X86_32_LAZY_GS */
193
194.macro SAVE_ALL
195	cld
196	PUSH_GS
197	pushl_cfi %fs
198	/*CFI_REL_OFFSET fs, 0;*/
199	pushl_cfi %es
200	/*CFI_REL_OFFSET es, 0;*/
201	pushl_cfi %ds
202	/*CFI_REL_OFFSET ds, 0;*/
203	pushl_cfi %eax
204	CFI_REL_OFFSET eax, 0
205	pushl_cfi %ebp
206	CFI_REL_OFFSET ebp, 0
207	pushl_cfi %edi
208	CFI_REL_OFFSET edi, 0
209	pushl_cfi %esi
210	CFI_REL_OFFSET esi, 0
211	pushl_cfi %edx
212	CFI_REL_OFFSET edx, 0
213	pushl_cfi %ecx
214	CFI_REL_OFFSET ecx, 0
215	pushl_cfi %ebx
216	CFI_REL_OFFSET ebx, 0
217	movl $(__USER_DS), %edx
218	movl %edx, %ds
219	movl %edx, %es
220	movl $(__KERNEL_PERCPU), %edx
221	movl %edx, %fs
222	SET_KERNEL_GS %edx
223.endm
224
225.macro RESTORE_INT_REGS
226	popl_cfi %ebx
227	CFI_RESTORE ebx
228	popl_cfi %ecx
229	CFI_RESTORE ecx
230	popl_cfi %edx
231	CFI_RESTORE edx
232	popl_cfi %esi
233	CFI_RESTORE esi
234	popl_cfi %edi
235	CFI_RESTORE edi
236	popl_cfi %ebp
237	CFI_RESTORE ebp
238	popl_cfi %eax
239	CFI_RESTORE eax
240.endm
241
242.macro RESTORE_REGS pop=0
243	RESTORE_INT_REGS
2441:	popl_cfi %ds
245	/*CFI_RESTORE ds;*/
2462:	popl_cfi %es
247	/*CFI_RESTORE es;*/
2483:	popl_cfi %fs
249	/*CFI_RESTORE fs;*/
250	POP_GS \pop
251.pushsection .fixup, "ax"
2524:	movl $0, (%esp)
253	jmp 1b
2545:	movl $0, (%esp)
255	jmp 2b
2566:	movl $0, (%esp)
257	jmp 3b
258.section __ex_table, "a"
259	.align 4
260	.long 1b, 4b
261	.long 2b, 5b
262	.long 3b, 6b
263.popsection
264	POP_GS_EX
265.endm
266
267.macro RING0_INT_FRAME
268	CFI_STARTPROC simple
269	CFI_SIGNAL_FRAME
270	CFI_DEF_CFA esp, 3*4
271	/*CFI_OFFSET cs, -2*4;*/
272	CFI_OFFSET eip, -3*4
273.endm
274
275.macro RING0_EC_FRAME
276	CFI_STARTPROC simple
277	CFI_SIGNAL_FRAME
278	CFI_DEF_CFA esp, 4*4
279	/*CFI_OFFSET cs, -2*4;*/
280	CFI_OFFSET eip, -3*4
281.endm
282
283.macro RING0_PTREGS_FRAME
284	CFI_STARTPROC simple
285	CFI_SIGNAL_FRAME
286	CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
287	/*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
288	CFI_OFFSET eip, PT_EIP-PT_OLDESP
289	/*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
290	/*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
291	CFI_OFFSET eax, PT_EAX-PT_OLDESP
292	CFI_OFFSET ebp, PT_EBP-PT_OLDESP
293	CFI_OFFSET edi, PT_EDI-PT_OLDESP
294	CFI_OFFSET esi, PT_ESI-PT_OLDESP
295	CFI_OFFSET edx, PT_EDX-PT_OLDESP
296	CFI_OFFSET ecx, PT_ECX-PT_OLDESP
297	CFI_OFFSET ebx, PT_EBX-PT_OLDESP
298.endm
299
300ENTRY(ret_from_fork)
301	CFI_STARTPROC
302	pushl_cfi %eax
303	call schedule_tail
304	GET_THREAD_INFO(%ebp)
305	popl_cfi %eax
306	pushl_cfi $0x0202		# Reset kernel eflags
307	popfl_cfi
308	jmp syscall_exit
309	CFI_ENDPROC
310END(ret_from_fork)
311
312/*
313 * Interrupt exit functions should be protected against kprobes
314 */
315	.pushsection .kprobes.text, "ax"
316/*
317 * Return to user mode is not as complex as all this looks,
318 * but we want the default path for a system call return to
319 * go as quickly as possible which is why some of this is
320 * less clear than it otherwise should be.
321 */
322
323	# userspace resumption stub bypassing syscall exit tracing
324	ALIGN
325	RING0_PTREGS_FRAME
326ret_from_exception:
327	preempt_stop(CLBR_ANY)
328ret_from_intr:
329	GET_THREAD_INFO(%ebp)
330check_userspace:
331	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS
332	movb PT_CS(%esp), %al
333	andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
334	cmpl $USER_RPL, %eax
335	jb resume_kernel		# not returning to v8086 or userspace
336
337ENTRY(resume_userspace)
338	LOCKDEP_SYS_EXIT
339 	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
340					# setting need_resched or sigpending
341					# between sampling and the iret
342	TRACE_IRQS_OFF
343	movl TI_flags(%ebp), %ecx
344	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done on
345					# int/exception return?
346	jne work_pending
347	jmp restore_all
348END(ret_from_exception)
349
350#ifdef CONFIG_PREEMPT
351ENTRY(resume_kernel)
352	DISABLE_INTERRUPTS(CLBR_ANY)
353	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
354	jnz restore_all
355need_resched:
356	movl TI_flags(%ebp), %ecx	# need_resched set ?
357	testb $_TIF_NEED_RESCHED, %cl
358	jz restore_all
359	testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)	# interrupts off (exception path) ?
360	jz restore_all
361	call preempt_schedule_irq
362	jmp need_resched
363END(resume_kernel)
364#endif
365	CFI_ENDPROC
366/*
367 * End of kprobes section
368 */
369	.popsection
370
371/* SYSENTER_RETURN points to after the "sysenter" instruction in
372   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
373
374	# sysenter call handler stub
375ENTRY(ia32_sysenter_target)
376	CFI_STARTPROC simple
377	CFI_SIGNAL_FRAME
378	CFI_DEF_CFA esp, 0
379	CFI_REGISTER esp, ebp
380	movl TSS_sysenter_sp0(%esp),%esp
381sysenter_past_esp:
382	/*
383	 * Interrupts are disabled here, but we can't trace it until
384	 * enough kernel state to call TRACE_IRQS_OFF can be called - but
385	 * we immediately enable interrupts at that point anyway.
386	 */
387	pushl_cfi $__USER_DS
388	/*CFI_REL_OFFSET ss, 0*/
389	pushl_cfi %ebp
390	CFI_REL_OFFSET esp, 0
391	pushfl_cfi
392	orl $X86_EFLAGS_IF, (%esp)
393	pushl_cfi $__USER_CS
394	/*CFI_REL_OFFSET cs, 0*/
395	/*
396	 * Push current_thread_info()->sysenter_return to the stack.
397	 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
398	 * pushed above; +8 corresponds to copy_thread's esp0 setting.
399	 */
400	pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
401	CFI_REL_OFFSET eip, 0
402
403	pushl_cfi %eax
404	SAVE_ALL
405	ENABLE_INTERRUPTS(CLBR_NONE)
406
407/*
408 * Load the potential sixth argument from user stack.
409 * Careful about security.
410 */
411	cmpl $__PAGE_OFFSET-3,%ebp
412	jae syscall_fault
4131:	movl (%ebp),%ebp
414	movl %ebp,PT_EBP(%esp)
415.section __ex_table,"a"
416	.align 4
417	.long 1b,syscall_fault
418.previous
419
420	GET_THREAD_INFO(%ebp)
421
422	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
423	jnz sysenter_audit
424sysenter_do_call:
425	cmpl $(NR_syscalls), %eax
426	jae syscall_badsys
427	call *sys_call_table(,%eax,4)
428	movl %eax,PT_EAX(%esp)
429	LOCKDEP_SYS_EXIT
430	DISABLE_INTERRUPTS(CLBR_ANY)
431	TRACE_IRQS_OFF
432	movl TI_flags(%ebp), %ecx
433	testl $_TIF_ALLWORK_MASK, %ecx
434	jne sysexit_audit
435sysenter_exit:
436/* if something modifies registers it must also disable sysexit */
437	movl PT_EIP(%esp), %edx
438	movl PT_OLDESP(%esp), %ecx
439	xorl %ebp,%ebp
440	TRACE_IRQS_ON
4411:	mov  PT_FS(%esp), %fs
442	PTGS_TO_GS
443	ENABLE_INTERRUPTS_SYSEXIT
444
445#ifdef CONFIG_AUDITSYSCALL
446sysenter_audit:
447	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
448	jnz syscall_trace_entry
449	addl $4,%esp
450	CFI_ADJUST_CFA_OFFSET -4
451	/* %esi already in 8(%esp)	   6th arg: 4th syscall arg */
452	/* %edx already in 4(%esp)	   5th arg: 3rd syscall arg */
453	/* %ecx already in 0(%esp)	   4th arg: 2nd syscall arg */
454	movl %ebx,%ecx			/* 3rd arg: 1st syscall arg */
455	movl %eax,%edx			/* 2nd arg: syscall number */
456	movl $AUDIT_ARCH_I386,%eax	/* 1st arg: audit arch */
457	call __audit_syscall_entry
458	pushl_cfi %ebx
459	movl PT_EAX(%esp),%eax		/* reload syscall number */
460	jmp sysenter_do_call
461
462sysexit_audit:
463	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
464	jne syscall_exit_work
465	TRACE_IRQS_ON
466	ENABLE_INTERRUPTS(CLBR_ANY)
467	movl %eax,%edx		/* second arg, syscall return value */
468	cmpl $-MAX_ERRNO,%eax	/* is it an error ? */
469	setbe %al		/* 1 if so, 0 if not */
470	movzbl %al,%eax		/* zero-extend that */
471	call __audit_syscall_exit
472	DISABLE_INTERRUPTS(CLBR_ANY)
473	TRACE_IRQS_OFF
474	movl TI_flags(%ebp), %ecx
475	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
476	jne syscall_exit_work
477	movl PT_EAX(%esp),%eax	/* reload syscall return value */
478	jmp sysenter_exit
479#endif
480
481	CFI_ENDPROC
482.pushsection .fixup,"ax"
4832:	movl $0,PT_FS(%esp)
484	jmp 1b
485.section __ex_table,"a"
486	.align 4
487	.long 1b,2b
488.popsection
489	PTGS_TO_GS_EX
490ENDPROC(ia32_sysenter_target)
491
492/*
493 * syscall stub including irq exit should be protected against kprobes
494 */
495	.pushsection .kprobes.text, "ax"
496	# system call handler stub
497ENTRY(system_call)
498	RING0_INT_FRAME			# can't unwind into user space anyway
499	pushl_cfi %eax			# save orig_eax
500	SAVE_ALL
501	GET_THREAD_INFO(%ebp)
502					# system call tracing in operation / emulation
503	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
504	jnz syscall_trace_entry
505	cmpl $(NR_syscalls), %eax
506	jae syscall_badsys
507syscall_call:
508	call *sys_call_table(,%eax,4)
509	movl %eax,PT_EAX(%esp)		# store the return value
510syscall_exit:
511	LOCKDEP_SYS_EXIT
512	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
513					# setting need_resched or sigpending
514					# between sampling and the iret
515	TRACE_IRQS_OFF
516	movl TI_flags(%ebp), %ecx
517	testl $_TIF_ALLWORK_MASK, %ecx	# current->work
518	jne syscall_exit_work
519
520restore_all:
521	TRACE_IRQS_IRET
522restore_all_notrace:
523	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS, SS and CS
524	# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
525	# are returning to the kernel.
526	# See comments in process.c:copy_thread() for details.
527	movb PT_OLDSS(%esp), %ah
528	movb PT_CS(%esp), %al
529	andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
530	cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
531	CFI_REMEMBER_STATE
532	je ldt_ss			# returning to user-space with LDT SS
533restore_nocheck:
534	RESTORE_REGS 4			# skip orig_eax/error_code
535irq_return:
536	INTERRUPT_RETURN
537.section .fixup,"ax"
538ENTRY(iret_exc)
539	pushl $0			# no error code
540	pushl $do_iret_error
541	jmp error_code
542.previous
543.section __ex_table,"a"
544	.align 4
545	.long irq_return,iret_exc
546.previous
547
548	CFI_RESTORE_STATE
549ldt_ss:
550	larl PT_OLDSS(%esp), %eax
551	jnz restore_nocheck
552	testl $0x00400000, %eax		# returning to 32bit stack?
553	jnz restore_nocheck		# allright, normal return
554
555#ifdef CONFIG_PARAVIRT
556	/*
557	 * The kernel can't run on a non-flat stack if paravirt mode
558	 * is active.  Rather than try to fixup the high bits of
559	 * ESP, bypass this code entirely.  This may break DOSemu
560	 * and/or Wine support in a paravirt VM, although the option
561	 * is still available to implement the setting of the high
562	 * 16-bits in the INTERRUPT_RETURN paravirt-op.
563	 */
564	cmpl $0, pv_info+PARAVIRT_enabled
565	jne restore_nocheck
566#endif
567
568/*
569 * Setup and switch to ESPFIX stack
570 *
571 * We're returning to userspace with a 16 bit stack. The CPU will not
572 * restore the high word of ESP for us on executing iret... This is an
573 * "official" bug of all the x86-compatible CPUs, which we can work
574 * around to make dosemu and wine happy. We do this by preloading the
575 * high word of ESP with the high word of the userspace ESP while
576 * compensating for the offset by changing to the ESPFIX segment with
577 * a base address that matches for the difference.
578 */
579#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
580	mov %esp, %edx			/* load kernel esp */
581	mov PT_OLDESP(%esp), %eax	/* load userspace esp */
582	mov %dx, %ax			/* eax: new kernel esp */
583	sub %eax, %edx			/* offset (low word is 0) */
584	shr $16, %edx
585	mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
586	mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
587	pushl_cfi $__ESPFIX_SS
588	pushl_cfi %eax			/* new kernel esp */
589	/* Disable interrupts, but do not irqtrace this section: we
590	 * will soon execute iret and the tracer was already set to
591	 * the irqstate after the iret */
592	DISABLE_INTERRUPTS(CLBR_EAX)
593	lss (%esp), %esp		/* switch to espfix segment */
594	CFI_ADJUST_CFA_OFFSET -8
595	jmp restore_nocheck
596	CFI_ENDPROC
597ENDPROC(system_call)
598
599	# perform work that needs to be done immediately before resumption
600	ALIGN
601	RING0_PTREGS_FRAME		# can't unwind into user space anyway
602work_pending:
603	testb $_TIF_NEED_RESCHED, %cl
604	jz work_notifysig
605work_resched:
606	call schedule
607	LOCKDEP_SYS_EXIT
608	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
609					# setting need_resched or sigpending
610					# between sampling and the iret
611	TRACE_IRQS_OFF
612	movl TI_flags(%ebp), %ecx
613	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other
614					# than syscall tracing?
615	jz restore_all
616	testb $_TIF_NEED_RESCHED, %cl
617	jnz work_resched
618
619work_notifysig:				# deal with pending signals and
620					# notify-resume requests
621#ifdef CONFIG_VM86
622	testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
623	movl %esp, %eax
624	jne work_notifysig_v86		# returning to kernel-space or
625					# vm86-space
626	TRACE_IRQS_ON
627	ENABLE_INTERRUPTS(CLBR_NONE)
628	xorl %edx, %edx
629	call do_notify_resume
630	jmp resume_userspace_sig
631
632	ALIGN
633work_notifysig_v86:
634	pushl_cfi %ecx			# save ti_flags for do_notify_resume
635	call save_v86_state		# %eax contains pt_regs pointer
636	popl_cfi %ecx
637	movl %eax, %esp
638#else
639	movl %esp, %eax
640#endif
641	TRACE_IRQS_ON
642	ENABLE_INTERRUPTS(CLBR_NONE)
643	xorl %edx, %edx
644	call do_notify_resume
645	jmp resume_userspace_sig
646END(work_pending)
647
648	# perform syscall exit tracing
649	ALIGN
650syscall_trace_entry:
651	movl $-ENOSYS,PT_EAX(%esp)
652	movl %esp, %eax
653	call syscall_trace_enter
654	/* What it returned is what we'll actually use.  */
655	cmpl $(NR_syscalls), %eax
656	jnae syscall_call
657	jmp syscall_exit
658END(syscall_trace_entry)
659
660	# perform syscall exit tracing
661	ALIGN
662syscall_exit_work:
663	testl $_TIF_WORK_SYSCALL_EXIT, %ecx
664	jz work_pending
665	TRACE_IRQS_ON
666	ENABLE_INTERRUPTS(CLBR_ANY)	# could let syscall_trace_leave() call
667					# schedule() instead
668	movl %esp, %eax
669	call syscall_trace_leave
670	jmp resume_userspace
671END(syscall_exit_work)
672	CFI_ENDPROC
673
674	RING0_INT_FRAME			# can't unwind into user space anyway
675syscall_fault:
676	GET_THREAD_INFO(%ebp)
677	movl $-EFAULT,PT_EAX(%esp)
678	jmp resume_userspace
679END(syscall_fault)
680
681syscall_badsys:
682	movl $-ENOSYS,PT_EAX(%esp)
683	jmp resume_userspace
684END(syscall_badsys)
685	CFI_ENDPROC
686/*
687 * End of kprobes section
688 */
689	.popsection
690
691/*
692 * System calls that need a pt_regs pointer.
693 */
694#define PTREGSCALL0(name) \
695ENTRY(ptregs_##name) ;  \
696	leal 4(%esp),%eax; \
697	jmp sys_##name; \
698ENDPROC(ptregs_##name)
699
700#define PTREGSCALL1(name) \
701ENTRY(ptregs_##name) ; \
702	leal 4(%esp),%edx; \
703	movl (PT_EBX+4)(%esp),%eax; \
704	jmp sys_##name; \
705ENDPROC(ptregs_##name)
706
707#define PTREGSCALL2(name) \
708ENTRY(ptregs_##name) ; \
709	leal 4(%esp),%ecx; \
710	movl (PT_ECX+4)(%esp),%edx; \
711	movl (PT_EBX+4)(%esp),%eax; \
712	jmp sys_##name; \
713ENDPROC(ptregs_##name)
714
715#define PTREGSCALL3(name) \
716ENTRY(ptregs_##name) ; \
717	CFI_STARTPROC; \
718	leal 4(%esp),%eax; \
719	pushl_cfi %eax; \
720	movl PT_EDX(%eax),%ecx; \
721	movl PT_ECX(%eax),%edx; \
722	movl PT_EBX(%eax),%eax; \
723	call sys_##name; \
724	addl $4,%esp; \
725	CFI_ADJUST_CFA_OFFSET -4; \
726	ret; \
727	CFI_ENDPROC; \
728ENDPROC(ptregs_##name)
729
730PTREGSCALL1(iopl)
731PTREGSCALL0(fork)
732PTREGSCALL0(vfork)
733PTREGSCALL3(execve)
734PTREGSCALL2(sigaltstack)
735PTREGSCALL0(sigreturn)
736PTREGSCALL0(rt_sigreturn)
737PTREGSCALL2(vm86)
738PTREGSCALL1(vm86old)
739
740/* Clone is an oddball.  The 4th arg is in %edi */
741ENTRY(ptregs_clone)
742	CFI_STARTPROC
743	leal 4(%esp),%eax
744	pushl_cfi %eax
745	pushl_cfi PT_EDI(%eax)
746	movl PT_EDX(%eax),%ecx
747	movl PT_ECX(%eax),%edx
748	movl PT_EBX(%eax),%eax
749	call sys_clone
750	addl $8,%esp
751	CFI_ADJUST_CFA_OFFSET -8
752	ret
753	CFI_ENDPROC
754ENDPROC(ptregs_clone)
755
756.macro FIXUP_ESPFIX_STACK
757/*
758 * Switch back for ESPFIX stack to the normal zerobased stack
759 *
760 * We can't call C functions using the ESPFIX stack. This code reads
761 * the high word of the segment base from the GDT and swiches to the
762 * normal stack and adjusts ESP with the matching offset.
763 */
764	/* fixup the stack */
765	mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
766	mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
767	shl $16, %eax
768	addl %esp, %eax			/* the adjusted stack pointer */
769	pushl_cfi $__KERNEL_DS
770	pushl_cfi %eax
771	lss (%esp), %esp		/* switch to the normal stack segment */
772	CFI_ADJUST_CFA_OFFSET -8
773.endm
774.macro UNWIND_ESPFIX_STACK
775	movl %ss, %eax
776	/* see if on espfix stack */
777	cmpw $__ESPFIX_SS, %ax
778	jne 27f
779	movl $__KERNEL_DS, %eax
780	movl %eax, %ds
781	movl %eax, %es
782	/* switch to normal stack */
783	FIXUP_ESPFIX_STACK
78427:
785.endm
786
787/*
788 * Build the entry stubs and pointer table with some assembler magic.
789 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
790 * single cache line on all modern x86 implementations.
791 */
792.section .init.rodata,"a"
793ENTRY(interrupt)
794.section .entry.text, "ax"
795	.p2align 5
796	.p2align CONFIG_X86_L1_CACHE_SHIFT
797ENTRY(irq_entries_start)
798	RING0_INT_FRAME
799vector=FIRST_EXTERNAL_VECTOR
800.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
801	.balign 32
802  .rept	7
803    .if vector < NR_VECTORS
804      .if vector <> FIRST_EXTERNAL_VECTOR
805	CFI_ADJUST_CFA_OFFSET -4
806      .endif
8071:	pushl_cfi $(~vector+0x80)	/* Note: always in signed byte range */
808      .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
809	jmp 2f
810      .endif
811      .previous
812	.long 1b
813      .section .entry.text, "ax"
814vector=vector+1
815    .endif
816  .endr
8172:	jmp common_interrupt
818.endr
819END(irq_entries_start)
820
821.previous
822END(interrupt)
823.previous
824
825/*
826 * the CPU automatically disables interrupts when executing an IRQ vector,
827 * so IRQ-flags tracing has to follow that:
828 */
829	.p2align CONFIG_X86_L1_CACHE_SHIFT
830common_interrupt:
831	addl $-0x80,(%esp)	/* Adjust vector into the [-256,-1] range */
832	SAVE_ALL
833	TRACE_IRQS_OFF
834	movl %esp,%eax
835	call do_IRQ
836	jmp ret_from_intr
837ENDPROC(common_interrupt)
838	CFI_ENDPROC
839
840/*
841 *  Irq entries should be protected against kprobes
842 */
843	.pushsection .kprobes.text, "ax"
844#define BUILD_INTERRUPT3(name, nr, fn)	\
845ENTRY(name)				\
846	RING0_INT_FRAME;		\
847	pushl_cfi $~(nr);		\
848	SAVE_ALL;			\
849	TRACE_IRQS_OFF			\
850	movl %esp,%eax;			\
851	call fn;			\
852	jmp ret_from_intr;		\
853	CFI_ENDPROC;			\
854ENDPROC(name)
855
856#define BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(name, nr, smp_##name)
857
858/* The include is where all of the SMP etc. interrupts come from */
859#include <asm/entry_arch.h>
860
861ENTRY(coprocessor_error)
862	RING0_INT_FRAME
863	pushl_cfi $0
864	pushl_cfi $do_coprocessor_error
865	jmp error_code
866	CFI_ENDPROC
867END(coprocessor_error)
868
869ENTRY(simd_coprocessor_error)
870	RING0_INT_FRAME
871	pushl_cfi $0
872#ifdef CONFIG_X86_INVD_BUG
873	/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
874661:	pushl_cfi $do_general_protection
875662:
876.section .altinstructions,"a"
877	altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
878.previous
879.section .altinstr_replacement,"ax"
880663:	pushl $do_simd_coprocessor_error
881664:
882.previous
883#else
884	pushl_cfi $do_simd_coprocessor_error
885#endif
886	jmp error_code
887	CFI_ENDPROC
888END(simd_coprocessor_error)
889
890ENTRY(device_not_available)
891	RING0_INT_FRAME
892	pushl_cfi $-1			# mark this as an int
893	pushl_cfi $do_device_not_available
894	jmp error_code
895	CFI_ENDPROC
896END(device_not_available)
897
898#ifdef CONFIG_PARAVIRT
899ENTRY(native_iret)
900	iret
901.section __ex_table,"a"
902	.align 4
903	.long native_iret, iret_exc
904.previous
905END(native_iret)
906
907ENTRY(native_irq_enable_sysexit)
908	sti
909	sysexit
910END(native_irq_enable_sysexit)
911#endif
912
913ENTRY(overflow)
914	RING0_INT_FRAME
915	pushl_cfi $0
916	pushl_cfi $do_overflow
917	jmp error_code
918	CFI_ENDPROC
919END(overflow)
920
921ENTRY(bounds)
922	RING0_INT_FRAME
923	pushl_cfi $0
924	pushl_cfi $do_bounds
925	jmp error_code
926	CFI_ENDPROC
927END(bounds)
928
929ENTRY(invalid_op)
930	RING0_INT_FRAME
931	pushl_cfi $0
932	pushl_cfi $do_invalid_op
933	jmp error_code
934	CFI_ENDPROC
935END(invalid_op)
936
937ENTRY(coprocessor_segment_overrun)
938	RING0_INT_FRAME
939	pushl_cfi $0
940	pushl_cfi $do_coprocessor_segment_overrun
941	jmp error_code
942	CFI_ENDPROC
943END(coprocessor_segment_overrun)
944
945ENTRY(invalid_TSS)
946	RING0_EC_FRAME
947	pushl_cfi $do_invalid_TSS
948	jmp error_code
949	CFI_ENDPROC
950END(invalid_TSS)
951
952ENTRY(segment_not_present)
953	RING0_EC_FRAME
954	pushl_cfi $do_segment_not_present
955	jmp error_code
956	CFI_ENDPROC
957END(segment_not_present)
958
959ENTRY(stack_segment)
960	RING0_EC_FRAME
961	pushl_cfi $do_stack_segment
962	jmp error_code
963	CFI_ENDPROC
964END(stack_segment)
965
966ENTRY(alignment_check)
967	RING0_EC_FRAME
968	pushl_cfi $do_alignment_check
969	jmp error_code
970	CFI_ENDPROC
971END(alignment_check)
972
973ENTRY(divide_error)
974	RING0_INT_FRAME
975	pushl_cfi $0			# no error code
976	pushl_cfi $do_divide_error
977	jmp error_code
978	CFI_ENDPROC
979END(divide_error)
980
981#ifdef CONFIG_X86_MCE
982ENTRY(machine_check)
983	RING0_INT_FRAME
984	pushl_cfi $0
985	pushl_cfi machine_check_vector
986	jmp error_code
987	CFI_ENDPROC
988END(machine_check)
989#endif
990
991ENTRY(spurious_interrupt_bug)
992	RING0_INT_FRAME
993	pushl_cfi $0
994	pushl_cfi $do_spurious_interrupt_bug
995	jmp error_code
996	CFI_ENDPROC
997END(spurious_interrupt_bug)
998/*
999 * End of kprobes section
1000 */
1001	.popsection
1002
1003ENTRY(kernel_thread_helper)
1004	pushl $0		# fake return address for unwinder
1005	CFI_STARTPROC
1006	movl %edi,%eax
1007	call *%esi
1008	call do_exit
1009	ud2			# padding for call trace
1010	CFI_ENDPROC
1011ENDPROC(kernel_thread_helper)
1012
1013#ifdef CONFIG_XEN
1014/* Xen doesn't set %esp to be precisely what the normal sysenter
1015   entrypoint expects, so fix it up before using the normal path. */
1016ENTRY(xen_sysenter_target)
1017	RING0_INT_FRAME
1018	addl $5*4, %esp		/* remove xen-provided frame */
1019	CFI_ADJUST_CFA_OFFSET -5*4
1020	jmp sysenter_past_esp
1021	CFI_ENDPROC
1022
1023ENTRY(xen_hypervisor_callback)
1024	CFI_STARTPROC
1025	pushl_cfi $0
1026	SAVE_ALL
1027	TRACE_IRQS_OFF
1028
1029	/* Check to see if we got the event in the critical
1030	   region in xen_iret_direct, after we've reenabled
1031	   events and checked for pending events.  This simulates
1032	   iret instruction's behaviour where it delivers a
1033	   pending interrupt when enabling interrupts. */
1034	movl PT_EIP(%esp),%eax
1035	cmpl $xen_iret_start_crit,%eax
1036	jb   1f
1037	cmpl $xen_iret_end_crit,%eax
1038	jae  1f
1039
1040	jmp  xen_iret_crit_fixup
1041
1042ENTRY(xen_do_upcall)
10431:	mov %esp, %eax
1044	call xen_evtchn_do_upcall
1045	jmp  ret_from_intr
1046	CFI_ENDPROC
1047ENDPROC(xen_hypervisor_callback)
1048
1049# Hypervisor uses this for application faults while it executes.
1050# We get here for two reasons:
1051#  1. Fault while reloading DS, ES, FS or GS
1052#  2. Fault while executing IRET
1053# Category 1 we fix up by reattempting the load, and zeroing the segment
1054# register if the load fails.
1055# Category 2 we fix up by jumping to do_iret_error. We cannot use the
1056# normal Linux return path in this case because if we use the IRET hypercall
1057# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1058# We distinguish between categories by maintaining a status value in EAX.
1059ENTRY(xen_failsafe_callback)
1060	CFI_STARTPROC
1061	pushl_cfi %eax
1062	movl $1,%eax
10631:	mov 4(%esp),%ds
10642:	mov 8(%esp),%es
10653:	mov 12(%esp),%fs
10664:	mov 16(%esp),%gs
1067	testl %eax,%eax
1068	popl_cfi %eax
1069	lea 16(%esp),%esp
1070	CFI_ADJUST_CFA_OFFSET -16
1071	jz 5f
1072	addl $16,%esp
1073	jmp iret_exc		# EAX != 0 => Category 2 (Bad IRET)
10745:	pushl_cfi $0		# EAX == 0 => Category 1 (Bad segment)
1075	SAVE_ALL
1076	jmp ret_from_exception
1077	CFI_ENDPROC
1078
1079.section .fixup,"ax"
10806:	xorl %eax,%eax
1081	movl %eax,4(%esp)
1082	jmp 1b
10837:	xorl %eax,%eax
1084	movl %eax,8(%esp)
1085	jmp 2b
10868:	xorl %eax,%eax
1087	movl %eax,12(%esp)
1088	jmp 3b
10899:	xorl %eax,%eax
1090	movl %eax,16(%esp)
1091	jmp 4b
1092.previous
1093.section __ex_table,"a"
1094	.align 4
1095	.long 1b,6b
1096	.long 2b,7b
1097	.long 3b,8b
1098	.long 4b,9b
1099.previous
1100ENDPROC(xen_failsafe_callback)
1101
1102BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
1103		xen_evtchn_do_upcall)
1104
1105#endif	/* CONFIG_XEN */
1106
1107#ifdef CONFIG_FUNCTION_TRACER
1108#ifdef CONFIG_DYNAMIC_FTRACE
1109
1110ENTRY(mcount)
1111	ret
1112END(mcount)
1113
1114ENTRY(ftrace_caller)
1115	cmpl $0, function_trace_stop
1116	jne  ftrace_stub
1117
1118	pushl %eax
1119	pushl %ecx
1120	pushl %edx
1121	movl 0xc(%esp), %eax
1122	movl 0x4(%ebp), %edx
1123	subl $MCOUNT_INSN_SIZE, %eax
1124
1125.globl ftrace_call
1126ftrace_call:
1127	call ftrace_stub
1128
1129	popl %edx
1130	popl %ecx
1131	popl %eax
1132#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1133.globl ftrace_graph_call
1134ftrace_graph_call:
1135	jmp ftrace_stub
1136#endif
1137
1138.globl ftrace_stub
1139ftrace_stub:
1140	ret
1141END(ftrace_caller)
1142
1143#else /* ! CONFIG_DYNAMIC_FTRACE */
1144
1145ENTRY(mcount)
1146	cmpl $0, function_trace_stop
1147	jne  ftrace_stub
1148
1149	cmpl $ftrace_stub, ftrace_trace_function
1150	jnz trace
1151#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1152	cmpl $ftrace_stub, ftrace_graph_return
1153	jnz ftrace_graph_caller
1154
1155	cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1156	jnz ftrace_graph_caller
1157#endif
1158.globl ftrace_stub
1159ftrace_stub:
1160	ret
1161
1162	/* taken from glibc */
1163trace:
1164	pushl %eax
1165	pushl %ecx
1166	pushl %edx
1167	movl 0xc(%esp), %eax
1168	movl 0x4(%ebp), %edx
1169	subl $MCOUNT_INSN_SIZE, %eax
1170
1171	call *ftrace_trace_function
1172
1173	popl %edx
1174	popl %ecx
1175	popl %eax
1176	jmp ftrace_stub
1177END(mcount)
1178#endif /* CONFIG_DYNAMIC_FTRACE */
1179#endif /* CONFIG_FUNCTION_TRACER */
1180
1181#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1182ENTRY(ftrace_graph_caller)
1183	cmpl $0, function_trace_stop
1184	jne ftrace_stub
1185
1186	pushl %eax
1187	pushl %ecx
1188	pushl %edx
1189	movl 0xc(%esp), %edx
1190	lea 0x4(%ebp), %eax
1191	movl (%ebp), %ecx
1192	subl $MCOUNT_INSN_SIZE, %edx
1193	call prepare_ftrace_return
1194	popl %edx
1195	popl %ecx
1196	popl %eax
1197	ret
1198END(ftrace_graph_caller)
1199
1200.globl return_to_handler
1201return_to_handler:
1202	pushl %eax
1203	pushl %edx
1204	movl %ebp, %eax
1205	call ftrace_return_to_handler
1206	movl %eax, %ecx
1207	popl %edx
1208	popl %eax
1209	jmp *%ecx
1210#endif
1211
1212/*
1213 * Some functions should be protected against kprobes
1214 */
1215	.pushsection .kprobes.text, "ax"
1216
1217ENTRY(page_fault)
1218	RING0_EC_FRAME
1219	pushl_cfi $do_page_fault
1220	ALIGN
1221error_code:
1222	/* the function address is in %gs's slot on the stack */
1223	pushl_cfi %fs
1224	/*CFI_REL_OFFSET fs, 0*/
1225	pushl_cfi %es
1226	/*CFI_REL_OFFSET es, 0*/
1227	pushl_cfi %ds
1228	/*CFI_REL_OFFSET ds, 0*/
1229	pushl_cfi %eax
1230	CFI_REL_OFFSET eax, 0
1231	pushl_cfi %ebp
1232	CFI_REL_OFFSET ebp, 0
1233	pushl_cfi %edi
1234	CFI_REL_OFFSET edi, 0
1235	pushl_cfi %esi
1236	CFI_REL_OFFSET esi, 0
1237	pushl_cfi %edx
1238	CFI_REL_OFFSET edx, 0
1239	pushl_cfi %ecx
1240	CFI_REL_OFFSET ecx, 0
1241	pushl_cfi %ebx
1242	CFI_REL_OFFSET ebx, 0
1243	cld
1244	movl $(__KERNEL_PERCPU), %ecx
1245	movl %ecx, %fs
1246	UNWIND_ESPFIX_STACK
1247	GS_TO_REG %ecx
1248	movl PT_GS(%esp), %edi		# get the function address
1249	movl PT_ORIG_EAX(%esp), %edx	# get the error code
1250	movl $-1, PT_ORIG_EAX(%esp)	# no syscall to restart
1251	REG_TO_PTGS %ecx
1252	SET_KERNEL_GS %ecx
1253	movl $(__USER_DS), %ecx
1254	movl %ecx, %ds
1255	movl %ecx, %es
1256	TRACE_IRQS_OFF
1257	movl %esp,%eax			# pt_regs pointer
1258	call *%edi
1259	jmp ret_from_exception
1260	CFI_ENDPROC
1261END(page_fault)
1262
1263/*
1264 * Debug traps and NMI can happen at the one SYSENTER instruction
1265 * that sets up the real kernel stack. Check here, since we can't
1266 * allow the wrong stack to be used.
1267 *
1268 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1269 * already pushed 3 words if it hits on the sysenter instruction:
1270 * eflags, cs and eip.
1271 *
1272 * We just load the right stack, and push the three (known) values
1273 * by hand onto the new stack - while updating the return eip past
1274 * the instruction that would have done it for sysenter.
1275 */
1276.macro FIX_STACK offset ok label
1277	cmpw $__KERNEL_CS, 4(%esp)
1278	jne \ok
1279\label:
1280	movl TSS_sysenter_sp0 + \offset(%esp), %esp
1281	CFI_DEF_CFA esp, 0
1282	CFI_UNDEFINED eip
1283	pushfl_cfi
1284	pushl_cfi $__KERNEL_CS
1285	pushl_cfi $sysenter_past_esp
1286	CFI_REL_OFFSET eip, 0
1287.endm
1288
1289ENTRY(debug)
1290	RING0_INT_FRAME
1291	cmpl $ia32_sysenter_target,(%esp)
1292	jne debug_stack_correct
1293	FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1294debug_stack_correct:
1295	pushl_cfi $-1			# mark this as an int
1296	SAVE_ALL
1297	TRACE_IRQS_OFF
1298	xorl %edx,%edx			# error code 0
1299	movl %esp,%eax			# pt_regs pointer
1300	call do_debug
1301	jmp ret_from_exception
1302	CFI_ENDPROC
1303END(debug)
1304
1305/*
1306 * NMI is doubly nasty. It can happen _while_ we're handling
1307 * a debug fault, and the debug fault hasn't yet been able to
1308 * clear up the stack. So we first check whether we got  an
1309 * NMI on the sysenter entry path, but after that we need to
1310 * check whether we got an NMI on the debug path where the debug
1311 * fault happened on the sysenter path.
1312 */
1313ENTRY(nmi)
1314	RING0_INT_FRAME
1315	pushl_cfi %eax
1316	movl %ss, %eax
1317	cmpw $__ESPFIX_SS, %ax
1318	popl_cfi %eax
1319	je nmi_espfix_stack
1320	cmpl $ia32_sysenter_target,(%esp)
1321	je nmi_stack_fixup
1322	pushl_cfi %eax
1323	movl %esp,%eax
1324	/* Do not access memory above the end of our stack page,
1325	 * it might not exist.
1326	 */
1327	andl $(THREAD_SIZE-1),%eax
1328	cmpl $(THREAD_SIZE-20),%eax
1329	popl_cfi %eax
1330	jae nmi_stack_correct
1331	cmpl $ia32_sysenter_target,12(%esp)
1332	je nmi_debug_stack_check
1333nmi_stack_correct:
1334	/* We have a RING0_INT_FRAME here */
1335	pushl_cfi %eax
1336	SAVE_ALL
1337	xorl %edx,%edx		# zero error code
1338	movl %esp,%eax		# pt_regs pointer
1339	call do_nmi
1340	jmp restore_all_notrace
1341	CFI_ENDPROC
1342
1343nmi_stack_fixup:
1344	RING0_INT_FRAME
1345	FIX_STACK 12, nmi_stack_correct, 1
1346	jmp nmi_stack_correct
1347
1348nmi_debug_stack_check:
1349	/* We have a RING0_INT_FRAME here */
1350	cmpw $__KERNEL_CS,16(%esp)
1351	jne nmi_stack_correct
1352	cmpl $debug,(%esp)
1353	jb nmi_stack_correct
1354	cmpl $debug_esp_fix_insn,(%esp)
1355	ja nmi_stack_correct
1356	FIX_STACK 24, nmi_stack_correct, 1
1357	jmp nmi_stack_correct
1358
1359nmi_espfix_stack:
1360	/* We have a RING0_INT_FRAME here.
1361	 *
1362	 * create the pointer to lss back
1363	 */
1364	pushl_cfi %ss
1365	pushl_cfi %esp
1366	addl $4, (%esp)
1367	/* copy the iret frame of 12 bytes */
1368	.rept 3
1369	pushl_cfi 16(%esp)
1370	.endr
1371	pushl_cfi %eax
1372	SAVE_ALL
1373	FIXUP_ESPFIX_STACK		# %eax == %esp
1374	xorl %edx,%edx			# zero error code
1375	call do_nmi
1376	RESTORE_REGS
1377	lss 12+4(%esp), %esp		# back to espfix stack
1378	CFI_ADJUST_CFA_OFFSET -24
1379	jmp irq_return
1380	CFI_ENDPROC
1381END(nmi)
1382
1383ENTRY(int3)
1384	RING0_INT_FRAME
1385	pushl_cfi $-1			# mark this as an int
1386	SAVE_ALL
1387	TRACE_IRQS_OFF
1388	xorl %edx,%edx		# zero error code
1389	movl %esp,%eax		# pt_regs pointer
1390	call do_int3
1391	jmp ret_from_exception
1392	CFI_ENDPROC
1393END(int3)
1394
1395ENTRY(general_protection)
1396	RING0_EC_FRAME
1397	pushl_cfi $do_general_protection
1398	jmp error_code
1399	CFI_ENDPROC
1400END(general_protection)
1401
1402#ifdef CONFIG_KVM_GUEST
1403ENTRY(async_page_fault)
1404	RING0_EC_FRAME
1405	pushl_cfi $do_async_page_fault
1406	jmp error_code
1407	CFI_ENDPROC
1408END(async_page_fault)
1409#endif
1410
1411/*
1412 * End of kprobes section
1413 */
1414	.popsection
1415