Lines Matching +full:inter +full:- +full:ic
1 /* SPDX-License-Identifier: GPL-2.0 */
7 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Copyright (C) 1999, 2002-2003
46 #include <asm/asm-offsets.h>
76 sxt4 r8=r8 // return 64-bit result
85 * security sensitive state (e.g., if current->mm->dumpable is zero). However,
148 mov out2=16 // stacksize (compensates for 16-byte scratch area)
161 * prev_task <- ia64_switch_to(struct task_struct *next)
216 SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
227 * specified at the call-site of save_switch_stack.
232 * - r16 holds ar.pfs
233 * - b7 holds address to return to
234 * - rp (b0) holds return address to save
262 st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0
273 st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5
275 add r2=SW(F2)+16,sp // r2 = &sw->f2
277 st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6
279 add r3=SW(F3)+16,sp // r3 = &sw->f3
286 st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7
297 st8 [r14]=r21,SW(B1)-SW(B0) // save b0
298 st8 [r15]=r23,SW(B3)-SW(B2) // save b2
302 st8 [r14]=r22,SW(B4)-SW(B1) // save b1
303 st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3
304 mov r21=ar.lc // I-unit
308 st8 [r14]=r25,SW(B5)-SW(B4) // save b4
309 st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs
336 stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
337 stf.spill [r3]=f31,SW(PR)-SW(F31)
340 st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat
341 st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
344 st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
355 * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
356 * - b7 holds address to return to
357 * - must not touch r8-r11
371 ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore
372 ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat
383 ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs
384 ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc
420 mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7
441 mov pr=r28,-1
462 * We need to preserve the scratch registers f6-f11 in case the system
481 (p6) br.cond.sptk strace_error // syscall failed ->
493 // the syscall number may have changed, so re-load it and re-calculate the
494 // syscall entry-point:
498 mov r3=NR_syscalls - 1
500 adds r15=-1024,r15
503 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
516 (p6) br.cond.sptk strace_error // syscall failed ->
523 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
534 (p6) mov r10=-1
558 /* call the kernel_thread payload; fn is in r4, arg - in r5 */
587 * Called by ia64_switch_to() after ia64_clone()->copy_thread(). r8 contains the
619 * the following known-to-be-safe values:
623 * r3: 1 (when returning to user-level)
624 * r8-r11: restored (syscall return value(s))
625 * r12: restored (user-level stack pointer)
626 * r13: restored (user-level thread pointer)
629 * r16-r17: cleared
630 * r18: user-level b6
632 * r20: user-level ar.fpsr
633 * r21: user-level b0
635 * r23: user-level ar.bspstore
636 * r24: user-level ar.rnat
637 * r25: user-level ar.unat
638 * r26: user-level ar.pfs
639 * r27: user-level ar.rsc
640 * r28: user-level ip
641 * r29: user-level psr
642 * r30: user-level cfm
643 * r31: user-level pr
644 * f6-f11: cleared
645 * pr: restored (user-level pr)
646 * b0: restored (user-level rp)
649 * ar.unat: restored (user-level ar.unat)
650 * ar.pfs: restored (user-level ar.pfs)
651 * ar.rsc: restored (user-level ar.rsc)
652 * ar.rnat: restored (user-level ar.rnat)
653 * ar.bspstore: restored (user-level ar.bspstore)
654 * ar.fpsr: restored (user-level ar.fpsr)
663 * user- or fsys-mode, hence we disable interrupts early on.
665 * p6 controls whether current_thread_info()->flags needs to be check for
666 * extra work. We always check for extra work when returning to user-level.
669 * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
678 (pKStk) ld4 r21=[r20] // r21 <- preempt_count
679 (pUStk) mov r21=0 // r21 <- 0
681 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
685 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
694 (p6) ld4 r31=[r18] // load current_thread_info()->flags
695 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
703 (p6) ld4 r31=[r18] // load current_thread_info()->flags
704 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
709 ld8 r18=[r2],PT(R9)-PT(B6) // load b6
712 ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
717 ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
718 ld8 r11=[r3],PT(CR_IIP)-PT(R11)
742 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
746 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
747 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
750 ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
751 ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
754 ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
807 * user- or fsys-mode, hence we disable interrupts early on.
809 * p6 controls whether current_thread_info()->flags needs to be check for
810 * extra work. We always check for extra work when returning to user-level.
813 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
822 (pKStk) ld4 r21=[r20] // r21 <- preempt_count
823 (pUStk) mov r21=0 // r21 <- 0
825 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
829 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
834 (p6) ld4 r31=[r17] // load current_thread_info()->flags
838 lfetch [r21],PT(CR_IPSR)-PT(PR)
846 ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
862 ld8.fill r10=[r3],PT(R17)-PT(R10)
864 ld8.fill r11=[r2],PT(R18)-PT(R11)
892 ld8.fill r31=[r2],PT(F9)-PT(R31)
893 adds r3=PT(F10)-PT(F6),r3
895 ldf.fill f9=[r2],PT(F6)-PT(F9)
896 ldf.fill f10=[r3],PT(F8)-PT(F10)
898 ldf.fill f6=[r2],PT(F7)-PT(F6)
900 ldf.fill f7=[r2],PT(F11)-PT(F7)
903 srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
964 // mib : mov add br -> mib : ld8 add br
969 (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
978 (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
996 sub r16=r16,r18 // krbs = old bsp - size of dirty partition
1000 add r18=64,r18 // don't force in0-in7 into memory...
1006 * To prevent leaking bits between the kernel and user-space,
1018 alloc loc0=ar.pfs,2,Nregs-2,2,0
1020 sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
1031 alloc loc0=ar.pfs,2,Nregs-2,2,0
1033 add out0=-Nregs*8,in0
1058 alloc loc0=ar.pfs,2,Nregs-2,2,0
1060 add out0=-Nregs*8,in0
1089 (pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
1093 (pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
1097 (pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
1101 (pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
1112 mov pr=r31,-1 // I0
1117 * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPTION)
1118 * r31 = current->thread_info->flags
1120 * p6 = TRUE if work-pending-check needs to be redone
1126 add r2=-8,r2
1127 add r3=-8,r3
1135 .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1141 .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
1159 * must deposit a non-zero value in pt_regs.r8 to indicate an error. If
1167 (p7) mov r10=-1
1173 * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
1202 adds out1=8,sp // out1=&sigscratch->ar_pfs
1208 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
1209 st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
1215 ld8 r9=[sp] // load new unat from sigscratch->scratch_unat
1231 adds sp=-16,sp
1236 * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined
1237 * syscall-entry path does not save them we save them here instead. Note: we
1238 * don't need to save any other registers that are not saved by the stream-lined
1295 adds sp=-EXTRA_FRAME_SIZE,sp
1360 adds out0 = -MCOUNT_INSN_SIZE, out0
1391 adds out0 = -MCOUNT_INSN_SIZE, out0