Lines Matching +full:ia32 +full:- +full:3 +full:a
1 // SPDX-License-Identifier: GPL-2.0-only
8 * X86-64 port
11 * CPU hotplug support - ashok.raj@intel.com
15 * This file handles the architecture-dependent parts of process handling..
51 #include <asm/ia32.h>
77 if (regs->orig_ax != -1) in __show_regs()
78 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); in __show_regs()
83 log_lvl, regs->ax, regs->bx, regs->cx); in __show_regs()
85 log_lvl, regs->dx, regs->si, regs->di); in __show_regs()
87 log_lvl, regs->bp, regs->r8, regs->r9); in __show_regs()
89 log_lvl, regs->r10, regs->r11, regs->r12); in __show_regs()
91 log_lvl, regs->r13, regs->r14, regs->r15); in __show_regs()
121 log_lvl, regs->cs, ds, es, cr0); in __show_regs()
128 get_debugreg(d3, 3); in __show_regs()
132 /* Only print out debug registers if they are in their non-default state. */ in __show_regs()
147 WARN_ON(dead_task->mm); in release_thread()
157 * traced or probed than any access to a per CPU variable happens with
184 * traced or probed than any access to a per CPU variable happens with
207 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
218 * be the pre-existing saved base or it could be zero. On AMD in save_base_legacy()
223 * context switch between 64-bit programs), and avoiding in save_base_legacy()
224 * the RDMSR helps a lot, so we just assume that whatever in save_base_legacy()
228 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we in save_base_legacy()
234 * If the selector is 1, 2, or 3, then the base is zero on in save_base_legacy()
240 * If selector > 3, then it refers to a real segment, and in save_base_legacy()
244 prev_p->thread.fsbase = 0; in save_base_legacy()
246 prev_p->thread.gsbase = 0; in save_base_legacy()
252 savesegment(fs, task->thread.fsindex); in save_fsgs()
253 savesegment(gs, task->thread.gsindex); in save_fsgs()
260 task->thread.fsbase = rdfsbase(); in save_fsgs()
261 task->thread.gsbase = __rdgsbase_inactive(); in save_fsgs()
263 save_base_legacy(task, task->thread.fsindex, FS); in save_fsgs()
264 save_base_legacy(task, task->thread.gsindex, GS); in save_fsgs()
269 * While a process is running,current->thread.fsbase and current->thread.gsbase
300 if (likely(next_index <= 3)) { in load_seg_legacy()
302 * The next task is using 64-bit TLS, is not using this in load_seg_legacy()
324 * Intel-style CPUs.) in load_seg_legacy()
337 * The next task is using a real segment. Loading the selector in load_seg_legacy()
346 * is not XSTATE managed on context switch because that would require a
357 prev->pkru = rdpkru(); in x86_pkru_load()
363 if (prev->pkru != next->pkru) in x86_pkru_load()
364 wrpkru(next->pkru); in x86_pkru_load()
372 if (unlikely(prev->fsindex || next->fsindex)) in x86_fsgsbase_load()
373 loadseg(FS, next->fsindex); in x86_fsgsbase_load()
374 if (unlikely(prev->gsindex || next->gsindex)) in x86_fsgsbase_load()
375 loadseg(GS, next->gsindex); in x86_fsgsbase_load()
378 wrfsbase(next->fsbase); in x86_fsgsbase_load()
379 __wrgsbase_inactive(next->gsbase); in x86_fsgsbase_load()
381 load_seg_legacy(prev->fsindex, prev->fsbase, in x86_fsgsbase_load()
382 next->fsindex, next->fsbase, FS); in x86_fsgsbase_load()
383 load_seg_legacy(prev->gsindex, prev->gsbase, in x86_fsgsbase_load()
384 next->gsindex, next->gsbase, GS); in x86_fsgsbase_load()
391 unsigned short idx = selector >> 3; in x86_fsgsbase_read_task()
405 idx -= GDT_ENTRY_TLS_MIN; in x86_fsgsbase_read_task()
406 base = get_desc_base(&task->thread.tls_array[idx]); in x86_fsgsbase_read_task()
413 * with RCU. This is a slow path, though, so we can just in x86_fsgsbase_read_task()
416 mutex_lock(&task->mm->context.lock); in x86_fsgsbase_read_task()
417 ldt = task->mm->context.ldt; in x86_fsgsbase_read_task()
418 if (unlikely(!ldt || idx >= ldt->nr_entries)) in x86_fsgsbase_read_task()
421 base = get_desc_base(ldt->entries + idx); in x86_fsgsbase_read_task()
422 mutex_unlock(&task->mm->context.lock); in x86_fsgsbase_read_task()
468 (task->thread.fsindex == 0)) in x86_fsbase_read_task()
469 fsbase = task->thread.fsbase; in x86_fsbase_read_task()
471 fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); in x86_fsbase_read_task()
483 (task->thread.gsindex == 0)) in x86_gsbase_read_task()
484 gsbase = task->thread.gsbase; in x86_gsbase_read_task()
486 gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); in x86_gsbase_read_task()
495 task->thread.fsbase = fsbase; in x86_fsbase_write_task()
502 task->thread.gsbase = gsbase; in x86_gsbase_write_task()
525 regs->ip = new_ip; in start_thread_common()
526 regs->sp = new_sp; in start_thread_common()
527 regs->cs = _cs; in start_thread_common()
528 regs->ss = _ss; in start_thread_common()
529 regs->flags = X86_EFLAGS_IF; in start_thread_common()
553 * - fold all the options into a flag word and test it with a single test.
554 * - could test fs/gs bitsliced
563 struct thread_struct *prev = &prev_p->thread; in __switch_to()
564 struct thread_struct *next = &next_p->thread; in __switch_to()
565 struct fpu *prev_fpu = &prev->fpu; in __switch_to()
608 savesegment(es, prev->es); in __switch_to()
609 if (unlikely(next->es | prev->es)) in __switch_to()
610 loadsegment(es, next->es); in __switch_to()
612 savesegment(ds, prev->ds); in __switch_to()
613 if (unlikely(next->ds | prev->ds)) in __switch_to()
614 loadsegment(ds, next->ds); in __switch_to()
635 * AMD CPUs have a misfeature: SYSRET sets the SS selector but in __switch_to()
636 * does not update the cached descriptor. As a result, if we in __switch_to()
644 * selectors at every context switch. SYSCALL sets up a valid in __switch_to()
645 * SS, so the only way to get NULL is to re-enter the kernel in __switch_to()
646 * from CPL 3 through an interrupt. Since that can't happen in __switch_to()
647 * in the same task as a running syscall, we are guaranteed to in __switch_to()
648 * context switch between every interrupt vector entry and a in __switch_to()
653 * it previously had a different non-NULL value. in __switch_to()
673 /* Pretend that this comes from a 64bit execve */ in set_personality_64bit()
674 task_pt_regs(current)->orig_ax = __NR_execve; in set_personality_64bit()
675 current_thread_info()->status &= ~TS_COMPAT; in set_personality_64bit()
676 if (current->mm) in set_personality_64bit()
677 __set_bit(MM_CONTEXT_HAS_VSYSCALL, ¤t->mm->context.flags); in set_personality_64bit()
683 current->personality &= ~READ_IMPLIES_EXEC; in set_personality_64bit()
689 if (current->mm) in __set_personality_x32()
690 current->mm->context.flags = 0; in __set_personality_x32()
692 current->personality &= ~READ_IMPLIES_EXEC; in __set_personality_x32()
699 * Pretend to come from a x32 execve. in __set_personality_x32()
701 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; in __set_personality_x32()
702 current_thread_info()->status &= ~TS_COMPAT; in __set_personality_x32()
709 if (current->mm) { in __set_personality_ia32()
714 __set_bit(MM_CONTEXT_UPROBE_IA32, ¤t->mm->context.flags); in __set_personality_ia32()
717 current->personality |= force_personality32; in __set_personality_ia32()
719 task_pt_regs(current)->orig_ax = __NR_ia32_execve; in __set_personality_ia32()
720 current_thread_info()->status |= TS_COMPAT; in __set_personality_ia32()
745 return (long)image->size; in prctl_map_vdso()
756 return -ENODEV; in prctl_enable_tagged_addr()
759 if (current->mm != mm) in prctl_enable_tagged_addr()
760 return -EINVAL; in prctl_enable_tagged_addr()
763 !test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags)) in prctl_enable_tagged_addr()
764 return -EINVAL; in prctl_enable_tagged_addr()
767 return -EINTR; in prctl_enable_tagged_addr()
769 if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { in prctl_enable_tagged_addr()
771 return -EBUSY; in prctl_enable_tagged_addr()
776 return -EINVAL; in prctl_enable_tagged_addr()
778 mm->context.lam_cr3_mask = X86_CR3_LAM_U57; in prctl_enable_tagged_addr()
779 mm->context.untag_mask = ~GENMASK(62, 57); in prctl_enable_tagged_addr()
782 return -EINVAL; in prctl_enable_tagged_addr()
785 write_cr3(__read_cr3() | mm->context.lam_cr3_mask); in prctl_enable_tagged_addr()
787 set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); in prctl_enable_tagged_addr()
802 return -EPERM; in do_arch_prctl_64()
816 * On non-FSGSBASE systems, save_base_legacy() expects in do_arch_prctl_64()
819 task->thread.gsbase = arg2; in do_arch_prctl_64()
822 task->thread.gsindex = 0; in do_arch_prctl_64()
834 return -EPERM; in do_arch_prctl_64()
846 * On non-FSGSBASE systems, save_base_legacy() expects in do_arch_prctl_64()
849 task->thread.fsbase = arg2; in do_arch_prctl_64()
851 task->thread.fsindex = 0; in do_arch_prctl_64()
884 return put_user(task->mm->context.untag_mask, in do_arch_prctl_64()
887 return prctl_enable_tagged_addr(task->mm, arg2); in do_arch_prctl_64()
890 return -EINVAL; in do_arch_prctl_64()
891 set_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &task->mm->context.flags); in do_arch_prctl_64()
906 ret = -EINVAL; in do_arch_prctl_64()
918 if (ret == -EINVAL) in SYSCALL_DEFINE2()
933 return task_pt_regs(task)->sp; in KSTK_ESP()