Lines Matching +full:a +full:- +full:8
1 /* SPDX-License-Identifier: GPL-2.0 */
9 * entry.S contains the system-call and fault low-level handling routines.
13 * A note on terminology:
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry: Define exception entry points.
26 #include <asm/asm-offsets.h>
40 #include <asm/nospec-branch.h>
50 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
52 * This is the only entry point used for 64-bit system calls. The
58 * well as some other programs and libraries. There are also a handful
59 * of SYSCALL instructions in the vDSO used, for example, as a
62 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
64 * rflags gets masked by a value from another MSR (so CLD and CLAC
71 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
78 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
82 * When user can change pt_regs->foo always force IRET. That is because
101 pushq $__USER_DS /* pt_regs->ss */
102 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
103 pushq %r11 /* pt_regs->flags */
104 pushq $__USER_CS /* pt_regs->cs */
105 pushq %rcx /* pt_regs->ip */
107 pushq %rax /* pt_regs->orig_ax */
109 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
124 * a completely clean 64-bit userspace context. If we're not,
148 pushq RSP-RDI(%rdi) /* RSP */
178 * Save callee-saved registers
198 * When switching from a shallower to a deeper call stack
206 /* restore callee-saved registers */
219 * A newly forked process directly context switches into this address.
228 * This is the start of the kernel stack; even through there's a
232 * This ensures stack unwinds of kernel threads terminate in a known
247 * -- at this point the register set should be a valid user set
271 ENCODE_FRAME_POINTER 8
277 * idtentry_body - Macro to emit code calling the C function
301 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
306 /* For some configurations \cfunc ends up being a noreturn. */
313 * idtentry - Macro to generate entry stubs for simple IDT entries
327 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8 signal=0
329 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8
337 pushq $-1 /* ORIG_RAX: no syscall to restart */
342 * If coming from kernel space, create a 6-word gap to allow the
343 * int3 handler to emulate a call instruction.
345 testb $3, CS-ORIG_RAX(%rsp)
348 pushq 5*8(%rsp)
350 UNWIND_HINT_IRET_REGS offset=8
367 * common_interrupt is a hotpath, align it to a cache line
383 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
405 pushq $-1 /* ORIG_RAX: no syscall to restart */
409 * a normal entry.
411 testb $3, CS-ORIG_RAX(%rsp)
435 * idtentry_vc - Macro to generate entry stub for #VC
444 * an IST stack by switching to the task stack if coming from user-space (which
446 * entered from kernel-mode.
448 * If entered from kernel-mode the return stack is validated first, and if it is
450 * will switch to a fall-back stack (VC2) and call a special handler function.
464 * a normal entry.
466 testb $3, CS-ORIG_RAX(%rsp)
471 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
480 * stack if it is safe to do so. If not it switches to the VC fall-back
492 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
500 * identical to the stack in the IRET frame or the VC fall-back stack,
521 UNWIND_HINT_IRET_ENTRY offset=8
532 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
535 /* For some configurations \cfunc ends up being a noreturn. */
572 add $8, %rsp /* orig_ax */
579 testb $3, 8(%rsp)
596 pushq 6*8(%rdi) /* SS */
597 pushq 5*8(%rdi) /* RSP */
598 pushq 4*8(%rdi) /* EFLAGS */
599 pushq 3*8(%rdi) /* CS */
600 pushq 2*8(%rdi) /* RIP */
629 addq $8, %rsp /* skip regs->orig_ax */
638 .long .Lnative_iret - (. + 4)
644 * Are we returning to a stack segment from the LDT? Note: in
645 * 64-bit mode SS:RSP on the exception stack is always valid.
648 testb $4, (SS-RIP)(%rsp)
655 * This may fault. Non-paranoid faults on return to userspace are
657 * Double-faults due to espfix64 are handled in exc_double_fault.
666 * values. We have a percpu ESPFIX stack that is eight slots
675 * --- top of ESPFIX stack ---
680 * RIP <-- RSP points here when we're done
681 * RAX <-- espfix_waddr points here
682 * --- bottom of ESPFIX stack ---
690 movq %rax, (0*8)(%rdi) /* user RAX */
691 movq (1*8)(%rsp), %rax /* user RIP */
692 movq %rax, (1*8)(%rdi)
693 movq (2*8)(%rsp), %rax /* user CS */
694 movq %rax, (2*8)(%rdi)
695 movq (3*8)(%rsp), %rax /* user RFLAGS */
696 movq %rax, (3*8)(%rdi)
697 movq (5*8)(%rsp), %rax /* user SS */
698 movq %rax, (5*8)(%rdi)
699 movq (4*8)(%rsp), %rax /* user RSP */
700 movq %rax, (4*8)(%rdi)
707 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
720 UNWIND_HINT_IRET_REGS offset=8
732 * is read-only and RSP[31:16] are preloaded with the userspace
761 /* This can't be a string because the preprocessor needs to see it. */
777 * A note on the "critical region" in our callback handler.
786 * existing activation in its critical region -- if so, we pop the current
838 movq 8(%rsp), %r11
841 UNWIND_HINT_IRET_REGS offset=8
845 movq 8(%rsp), %r11
848 pushq $-1 /* orig_ax = -1 => not a system call */
860 * N 0 -> SWAPGS on exit
861 * 1 -> no SWAPGS on exit
865 * R14 - old CR3
866 * R15 - old SPEC_CTRL
872 ENCODE_FRAME_POINTER 8
881 * hardware at entry) can not be used: this may be a return
882 * to kernel code, but with a user CR3 value.
887 * be retrieved from a kernel internal table.
907 * loads based on a mispredicted GS base can happen, therefore no LFENCE
914 /* EBX = 1 -> kernel GSBASE active, no restore required */
918 * The kernel-enforced convention is a negative GSBASE indicates
919 * a kernel value. No SWAPGS needed on entry and exit.
926 /* EBX = 0 -> SWAPGS required on exit */
935 * CR3 above, keep the old value in a callee saved register.
945 * only on return from non-NMI IST interrupts that came
957 * N 0 -> SWAPGS on exit
958 * 1 -> no SWAPGS on exit
962 * R14 - old CR3
963 * R15 - old SPEC_CTRL
970 * to the per-CPU x86_spec_ctrl_shadow variable.
992 /* On non-FSGSBASE systems, conditionally do SWAPGS */
996 /* We are returning to a context with user GSBASE */
1009 ENCODE_FRAME_POINTER 8
1011 testb $3, CS+8(%rsp)
1025 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1031 * usergs. Handle them here. B stepping K8s sometimes report a
1037 cmpq %rcx, RIP+8(%rsp)
1040 cmpq %rax, RIP+8(%rsp)
1042 cmpq $.Lgs_change, RIP+8(%rsp)
1053 * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
1059 leaq 8(%rsp), %rax /* return pt_regs pointer */
1065 movq %rcx, RIP+8(%rsp)
1083 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1110 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1115 * stack of the previous NMI. NMI handlers are not re-entrant
1119 * Check a special location on the stack that contains a
1137 * a nested NMI that updated the copy interrupt stack frame, a
1142 * with a single IRET instruction. Similarly, IRET to user mode
1153 testb $3, CS-RIP+8(%rsp)
1172 UNWIND_HINT_IRET_REGS base=%rdx offset=8
1173 pushq 5*8(%rdx) /* pt_regs->ss */
1174 pushq 4*8(%rdx) /* pt_regs->rsp */
1175 pushq 3*8(%rdx) /* pt_regs->flags */
1176 pushq 2*8(%rdx) /* pt_regs->cs */
1177 pushq 1*8(%rdx) /* pt_regs->rip */
1179 pushq $-1 /* pt_regs->orig_ax */
1188 * due to nesting -- we're on the normal thread stack and we're
1204 * +---------------------------------------------------------+
1210 * +---------------------------------------------------------+
1212 * +---------------------------------------------------------+
1214 * +---------------------------------------------------------+
1217 * | iret RFLAGS } by a nested NMI to force another |
1220 * +---------------------------------------------------------+
1226 * +---------------------------------------------------------+
1228 * +---------------------------------------------------------+
1230 * The "original" frame is used by hardware. Before re-enabling
1244 * Determine whether we're a nested NMI.
1247 * end_repeat_nmi, then we are a nested NMI. We must not
1255 cmpq 8(%rsp), %rdx
1258 cmpq 8(%rsp), %rdx
1267 cmpl $1, -8(%rsp)
1277 * pull a fast one on naughty userspace, though: we program
1282 lea 6*8(%rsp), %rdx
1283 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1284 cmpq %rdx, 4*8(%rsp)
1285 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1289 cmpq %rdx, 4*8(%rsp)
1290 /* If it is below the NMI stack, it is a normal NMI */
1295 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1298 /* This is a nested NMI. */
1305 subq $8, %rsp
1306 leaq -10*8(%rsp), %rdx
1314 addq $(6*8), %rsp
1319 /* We are returning to kernel mode, so this cannot result in a fault. */
1330 subq $(5*8), %rsp
1334 pushq 11*8(%rsp)
1346 pushq %rsp /* RSP (minus 8 because of the previous push) */
1347 addq $8, (%rsp) /* Fix up RSP */
1359 * If there was a nested NMI, the first NMI's iret will return
1364 * This makes it safe to copy to the stack frame that a nested
1373 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1380 addq $(10*8), %rsp
1382 pushq -6*8(%rsp)
1384 subq $(5*8), %rsp
1389 * Everything below this point can be preempted by a nested NMI.
1393 pushq $-1 /* ORIG_RAX: no syscall to restart */
1428 /* EBX == 0 -> invoke SWAPGS */
1442 addq $6*8, %rsp
1454 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1463 * iretq reads the "iret" frame and exits the NMI stack in a
1465 * cannot result in a fault. Similarly, we don't need to worry
1472 * This handles SYSCALL from 32-bit code. There is no way to program
1473 * MSRs to fully disable 32-bit SYSCALL.
1478 mov $-ENOSYS, %eax
1491 leaq -PTREGS_SIZE(%rax), %rsp