Lines Matching full:esp
13 * 0(%esp) - %ebx
14 * 4(%esp) - %ecx
15 * 8(%esp) - %edx
16 * C(%esp) - %esi
17 * 10(%esp) - %edi
18 * 14(%esp) - %ebp
19 * 18(%esp) - %eax
20 * 1C(%esp) - %ds
21 * 20(%esp) - %es
22 * 24(%esp) - %fs
23 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
24 * 2C(%esp) - orig_eax
25 * 30(%esp) - %eip
26 * 34(%esp) - %cs
27 * 38(%esp) - %eflags
28 * 3C(%esp) - %oldesp
29 * 40(%esp) - %oldss
72 addl $(4 + \pop), %esp
98 add $\pop, %esp
103 99: movl $0, (%esp)
110 98: mov PT_GS(%esp), %gs
114 99: movl $0, PT_GS(%esp)
124 movl \reg, PT_GS(%esp)
148 testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
188 andl $0x0000ffff, 4*4(%esp)
191 testl $X86_EFLAGS_VM, 5*4(%esp)
194 testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
197 orl $CS_FROM_KERNEL, 4*4(%esp)
202 * 6*4(%esp) - <previous context>
203 * 5*4(%esp) - flags
204 * 4*4(%esp) - cs
205 * 3*4(%esp) - ip
206 * 2*4(%esp) - orig_eax
207 * 1*4(%esp) - gs / function
208 * 0*4(%esp) - fs
214 * 14*4(%esp) - <previous context>
215 * 13*4(%esp) - gap / flags
216 * 12*4(%esp) - gap / cs
217 * 11*4(%esp) - gap / ip
218 * 10*4(%esp) - gap / orig_eax
219 * 9*4(%esp) - gap / gs / function
220 * 8*4(%esp) - gap / fs
221 * 7*4(%esp) - ss
222 * 6*4(%esp) - sp
223 * 5*4(%esp) - flags
224 * 4*4(%esp) - cs
225 * 3*4(%esp) - ip
226 * 2*4(%esp) - orig_eax
227 * 1*4(%esp) - gs / function
228 * 0*4(%esp) - fs
232 pushl %esp # sp (points at ss)
233 addl $7*4, (%esp) # point sp back at the previous context
234 pushl 7*4(%esp) # flags
235 pushl 7*4(%esp) # cs
236 pushl 7*4(%esp) # ip
237 pushl 7*4(%esp) # orig_eax
238 pushl 7*4(%esp) # gs / function
239 pushl 7*4(%esp) # fs
247 * mode and therefore have a nonzero SS base and an offset ESP,
249 * accesses through %esp, which automatically use SS.)
251 testl $CS_FROM_KERNEL, 1*4(%esp)
256 * regs->sp without lowering %esp in between, such that an NMI in the
261 movl 5*4(%esp), %eax # (modified) regs->sp
263 movl 4*4(%esp), %ecx # flags
266 movl 3*4(%esp), %ecx # cs
270 movl 2*4(%esp), %ecx # ip
273 movl 1*4(%esp), %ecx # eax
277 lea -4*4(%eax), %esp
354 4: movl $0, (%esp)
356 5: movl $0, (%esp)
358 6: movl $0, (%esp)
396 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
398 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
402 movb PT_OLDSS(%esp), %ah
403 movb PT_CS(%esp), %al
412 * restore the high word of ESP for us on executing iret... This is an
415 * high word of ESP with the high word of the userspace ESP while
419 mov %esp, %edx /* load kernel esp */
420 mov PT_OLDESP(%esp), %eax /* load userspace esp */
421 mov %dx, %ax /* eax: new kernel esp */
427 pushl %eax /* new kernel esp */
434 lss (%esp), %esp /* switch to espfix segment */
443 * We need to be very careful here with the %esp switch, because an NMI
464 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
469 movl %esp, %esi
481 movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
482 movb PT_CS(%esp), %cl
485 movl PT_CS(%esp), %ecx
511 movl %edi, %esp
527 * kernel-mode and %esp points to the entry-stack. When this
548 * %esi: Entry-Stack pointer (same as %esp)
564 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
572 orl $CS_FROM_USER_CR3, PT_CS(%esp)
587 * The %esp register must point to pt_regs on the task stack. It will
604 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
616 movl %esp, %esi
631 movl %ebx, %esp
650 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
656 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
659 movl %esp, %esi
682 movl %ebx, %esp
688 testl $CS_FROM_USER_CR3, PT_CS(%esp)
692 andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
728 movl %esp, %eax
729 movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */
730 movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */
777 movl %esp, TASK_threadsp(%eax)
778 movl TASK_threadsp(%edx), %esp
844 movl %esp, %eax
856 movl $0, PT_EAX(%esp)
884 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
887 * and does not save old EIP (!!!), ESP, or EFLAGS.
917 movl TSS_entry2task_stack(%esp), %esp
947 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
951 movl %esp, %eax
970 movl PT_EFLAGS(%esp), %edi
971 movl PT_EAX(%esp), %esi
976 movl PT_EIP(%esp), %edx /* pt_regs->ip */
977 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
978 1: mov PT_FS(%esp), %fs
982 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
988 movl %eax, %esp
998 btrl $X86_EFLAGS_IF_BIT, (%esp)
1011 2: movl $0, PT_FS(%esp)
1058 movl %esp, %eax
1111 * normal stack and adjusts ESP with the matching offset.
1120 subl $2*4, %esp
1121 sgdt (%esp)
1122 movl 2(%esp), %ecx /* GDT address */
1130 addl $2*4, %esp
1132 addl %esp, %eax /* the adjusted stack pointer */
1135 lss (%esp), %esp /* switch to the normal stack segment */
1159 movl PT_GS(%esp), %edi # get the function address
1164 movl PT_ORIG_EAX(%esp), %edx # get the error code
1165 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1167 movl %esp, %eax # pt_regs pointer
1172 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
1173 movb PT_CS(%esp), %al
1179 movl PT_CS(%esp), %eax
1191 movl %esp, %eax
1261 movl %esp, %eax # pt_regs pointer
1266 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1279 movl %esp, %ebx
1280 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1282 movl %ebx, %esp
1286 testl $CS_FROM_ESPFIX, PT_CS(%esp)
1300 pushl %esp
1301 addl $4, (%esp)
1304 pushl 4*4(%esp) # flags
1305 pushl 4*4(%esp) # cs
1306 pushl 4*4(%esp) # ip
1314 xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
1317 movl %esp, %eax # pt_regs pointer
1332 lss (1+5+6)*4(%esp), %esp # back to espfix stack
1343 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp