Lines Matching +full:protect +full:- +full:exec
1 // SPDX-License-Identifier: GPL-2.0-only
67 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
87 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
92 * - the task gets scheduled in; if both the task's fpsimd_cpu field
93 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
97 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
103 * - the task executes an ordinary syscall; upon return to userland, the
107 * - the task executes a syscall which executes some NEON instructions; this is
109 * register contents to memory, clears the fpsimd_last_state per-cpu variable
112 * - the task gets preempted after kernel_neon_end() is called; as we have not
125 static int __sve_default_vl = -1;
178 * The double-underscore version must only be called if you know the task
218 kfree(task->thread.sve_state); in __sve_free()
219 task->thread.sve_state = NULL; in __sve_free()
244 * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
245 * corresponding Zn), P0-P15 and FFR are encoded in in
246 * task->thread.sve_state, formatted appropriately for vector
247 * length task->thread.sve_vl.
249 * task->thread.sve_state must point to a valid buffer at least
261 * When stored, FPSIMD registers V0-V31 are encoded in
262 * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
263 * logically zero but not stored anywhere; P0-P15 and FFR are not
268 * task->thread.sve_state does not need to be non-NULL, valid or any
271 * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
289 sve_load_state(sve_pffr(¤t->thread), in task_fpsimd_load()
290 ¤t->thread.uw.fpsimd_state.fpsr, in task_fpsimd_load()
291 sve_vq_from_vl(current->thread.sve_vl) - 1); in task_fpsimd_load()
293 fpsimd_load_state(¤t->thread.uw.fpsimd_state); in task_fpsimd_load()
311 if (WARN_ON(sve_get_vl() != last->sve_vl)) { in fpsimd_save()
314 * re-enter user with corrupt state. in fpsimd_save()
321 sve_save_state((char *)last->sve_state + in fpsimd_save()
322 sve_ffr_offset(last->sve_vl), in fpsimd_save()
323 &last->st->fpsr); in fpsimd_save()
325 fpsimd_save_state(last->st); in fpsimd_save()
331 * We're on a slow path, so some sanity-checks are included.
370 /* Writing -1 has the special meaning "set to max": */ in sve_proc_do_default_vl()
371 if (vl == -1) in sve_proc_do_default_vl()
375 return -EINVAL; in sve_proc_do_default_vl()
394 return -EINVAL; in sve_sysctl_init()
404 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
431 *p = arm64_cpu_to_le128(fst->vregs[i]); in __fpsimd_to_sve()
436 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
437 * task->thread.sve_state.
439 * Task can be a non-runnable task, or current. In the latter case,
442 * task->thread.sve_state must point to at least sve_state_size(task)
444 * task->thread.uw.fpsimd_state must be up to date before calling this
450 void *sst = task->thread.sve_state; in fpsimd_to_sve()
451 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; in fpsimd_to_sve()
456 vq = sve_vq_from_vl(task->thread.sve_vl); in fpsimd_to_sve()
461 * Transfer the SVE state in task->thread.sve_state to
462 * task->thread.uw.fpsimd_state.
464 * Task can be a non-runnable task, or current. In the latter case,
467 * task->thread.sve_state must point to at least sve_state_size(task)
469 * task->thread.sve_state must be up to date before calling this function.
474 void const *sst = task->thread.sve_state; in sve_to_fpsimd()
475 struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; in sve_to_fpsimd()
482 vq = sve_vq_from_vl(task->thread.sve_vl); in sve_to_fpsimd()
485 fst->vregs[i] = arm64_le128_to_cpu(*p); in sve_to_fpsimd()
497 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl)); in sve_state_size()
501 * Ensure that task->thread.sve_state is allocated and sufficiently large.
504 * task->thread.sve_state with new data. The memory is always zeroed
512 if (task->thread.sve_state) { in sve_alloc()
513 memset(task->thread.sve_state, 0, sve_state_size(current)); in sve_alloc()
518 task->thread.sve_state = in sve_alloc()
525 BUG_ON(!task->thread.sve_state); in sve_alloc()
530 * Ensure that task->thread.sve_state is up to date with respect to
533 * This should only be called by ptrace. task must be non-runnable.
534 * task->thread.sve_state must point to at least sve_state_size(task)
544 * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
547 * This should only be called by ptrace. task must be non-runnable.
548 * task->thread.sve_state must point to at least sve_state_size(task)
558 * Ensure that task->thread.sve_state is up to date with respect to
559 * the task->thread.uw.fpsimd_state.
563 * task must be non-runnable.
564 * task->thread.sve_state must point to at least sve_state_size(task)
566 * task->thread.uw.fpsimd_state must already have been initialised with
572 void *sst = task->thread.sve_state; in sve_sync_from_fpsimd_zeropad()
573 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; in sve_sync_from_fpsimd_zeropad()
578 vq = sve_vq_from_vl(task->thread.sve_vl); in sve_sync_from_fpsimd_zeropad()
589 return -EINVAL; in sve_set_vector_length()
592 return -EINVAL; in sve_set_vector_length()
595 * Clamp to the maximum vector length that VL-agnostic SVE code can in sve_set_vector_length()
606 task->thread.sve_vl_onexec = vl; in sve_set_vector_length()
608 /* Reset VL to system default on next exec: */ in sve_set_vector_length()
609 task->thread.sve_vl_onexec = 0; in sve_set_vector_length()
615 if (vl == task->thread.sve_vl) in sve_set_vector_length()
621 * non-SVE thread. in sve_set_vector_length()
642 task->thread.sve_vl = vl; in sve_set_vector_length()
662 ret = current->thread.sve_vl_onexec; in sve_prctl_status()
664 ret = current->thread.sve_vl; in sve_prctl_status()
682 return -EINVAL; in sve_set_current_vl()
695 return -EINVAL; in sve_get_current_vl()
710 for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) { in sve_probe_vqs()
711 write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ in sve_probe_vqs()
731 * This function is called during the bring-up of early secondary CPUs only.
744 * This function is called during the bring-up of late secondary CPUs only.
757 return -EINVAL; in sve_verify_vq_map()
786 return -EINVAL; in sve_verify_vq_map()
827 * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
848 zcr |= vq_max - 1; /* set LEN field to maximum effective value */ in read_zcr_features()
863 * The SVE architecture mandates support for 128-bit vectors, in sve_setup()
874 * Sanity-check that the max VL we determined through CPU features in sve_setup()
891 /* No non-virtualisable VLs found */ in sve_setup()
893 else if (WARN_ON(b == SVE_VQ_MAX - 1)) in sve_setup()
941 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); in do_sve_acc()
1013 &next->thread.uw.fpsimd_state; in fpsimd_thread_switch()
1014 wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id(); in fpsimd_thread_switch()
1032 memset(¤t->thread.uw.fpsimd_state, 0, in fpsimd_flush_thread()
1033 sizeof(current->thread.uw.fpsimd_state)); in fpsimd_flush_thread()
1043 * task without an exec and hence a call to this function. in fpsimd_flush_thread()
1050 vl = current->thread.sve_vl_onexec ? in fpsimd_flush_thread()
1051 current->thread.sve_vl_onexec : get_sve_default_vl(); in fpsimd_flush_thread()
1060 current->thread.sve_vl = vl; in fpsimd_flush_thread()
1064 * length will be reset by a subsequent exec: in fpsimd_flush_thread()
1067 current->thread.sve_vl_onexec = 0; in fpsimd_flush_thread()
1089 * current->thread.uw.fpsimd_state is updated so that it can be copied to
1110 last->st = ¤t->thread.uw.fpsimd_state; in fpsimd_bind_task_to_cpu()
1111 last->sve_state = current->thread.sve_state; in fpsimd_bind_task_to_cpu()
1112 last->sve_vl = current->thread.sve_vl; in fpsimd_bind_task_to_cpu()
1113 current->thread.fpsimd_cpu = smp_processor_id(); in fpsimd_bind_task_to_cpu()
1135 last->st = st; in fpsimd_bind_state_to_cpu()
1136 last->sve_state = sve_state; in fpsimd_bind_state_to_cpu()
1137 last->sve_vl = sve_vl; in fpsimd_bind_state_to_cpu()
1183 current->thread.uw.fpsimd_state = *state; in fpsimd_update_current_state()
1201 * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1208 t->thread.fpsimd_cpu = NR_CPUS; in fpsimd_flush_task_state()
1252 * Kernel-side NEON support functions
1319 * Do not use them for any other purpose -- if tempted to do so, you are
1346 &this_cpu_ptr(&efi_fpsimd_state)->fpsr); in __efi_fpsimd_begin()
1371 &this_cpu_ptr(&efi_fpsimd_state)->fpsr, in __efi_fpsimd_end()
1372 sve_vq_from_vl(sve_get_vl()) - 1); in __efi_fpsimd_end()
1441 pr_notice("Floating-point is not implemented\n"); in fpsimd_init()