1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Chen Liqin <liqin.chen@sunplusct.com>
5 * Lennox Wu <lennox.wu@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 * Copyright (C) 2017 SiFive
8 */
9
10 #include <linux/cpu.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/tick.h>
16 #include <linux/ptrace.h>
17 #include <linux/uaccess.h>
18 #include <linux/personality.h>
19
20 #include <asm/unistd.h>
21 #include <asm/processor.h>
22 #include <asm/csr.h>
23 #include <asm/stacktrace.h>
24 #include <asm/string.h>
25 #include <asm/switch_to.h>
26 #include <asm/thread_info.h>
27 #include <asm/cpuidle.h>
28 #include <asm/vector.h>
29 #include <asm/cpufeature.h>
30 #include <asm/exec.h>
31
32 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
33 #include <linux/stackprotector.h>
34 unsigned long __stack_chk_guard __read_mostly;
35 EXPORT_SYMBOL(__stack_chk_guard);
36 #endif
37
38 extern asmlinkage void ret_from_fork(void);
39
arch_cpu_idle(void)40 void noinstr arch_cpu_idle(void)
41 {
42 cpu_do_idle();
43 }
44
set_unalign_ctl(struct task_struct * tsk,unsigned int val)45 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
46 {
47 if (!unaligned_ctl_available())
48 return -EINVAL;
49
50 tsk->thread.align_ctl = val;
51 return 0;
52 }
53
get_unalign_ctl(struct task_struct * tsk,unsigned long adr)54 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
55 {
56 if (!unaligned_ctl_available())
57 return -EINVAL;
58
59 return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr);
60 }
61
__show_regs(struct pt_regs * regs)62 void __show_regs(struct pt_regs *regs)
63 {
64 show_regs_print_info(KERN_DEFAULT);
65
66 if (!user_mode(regs)) {
67 pr_cont("epc : %pS\n", (void *)regs->epc);
68 pr_cont(" ra : %pS\n", (void *)regs->ra);
69 }
70
71 pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
72 regs->epc, regs->ra, regs->sp);
73 pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
74 regs->gp, regs->tp, regs->t0);
75 pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
76 regs->t1, regs->t2, regs->s0);
77 pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
78 regs->s1, regs->a0, regs->a1);
79 pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
80 regs->a2, regs->a3, regs->a4);
81 pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
82 regs->a5, regs->a6, regs->a7);
83 pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
84 regs->s2, regs->s3, regs->s4);
85 pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
86 regs->s5, regs->s6, regs->s7);
87 pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
88 regs->s8, regs->s9, regs->s10);
89 pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
90 regs->s11, regs->t3, regs->t4);
91 pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
92 regs->t5, regs->t6);
93
94 pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
95 regs->status, regs->badaddr, regs->cause);
96 }
show_regs(struct pt_regs * regs)97 void show_regs(struct pt_regs *regs)
98 {
99 __show_regs(regs);
100 if (!user_mode(regs))
101 dump_backtrace(regs, NULL, KERN_DEFAULT);
102 }
103
arch_align_stack(unsigned long sp)104 unsigned long arch_align_stack(unsigned long sp)
105 {
106 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
107 sp -= get_random_u32_below(PAGE_SIZE);
108 return sp & ~0xf;
109 }
110
111 #ifdef CONFIG_COMPAT
112 static bool compat_mode_supported __read_mostly;
113
compat_elf_check_arch(Elf32_Ehdr * hdr)114 bool compat_elf_check_arch(Elf32_Ehdr *hdr)
115 {
116 return compat_mode_supported &&
117 hdr->e_machine == EM_RISCV &&
118 hdr->e_ident[EI_CLASS] == ELFCLASS32;
119 }
120
compat_mode_detect(void)121 static int __init compat_mode_detect(void)
122 {
123 unsigned long tmp = csr_read(CSR_STATUS);
124
125 csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
126 compat_mode_supported =
127 (csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
128
129 csr_write(CSR_STATUS, tmp);
130
131 pr_info("riscv: ELF compat mode %s",
132 compat_mode_supported ? "supported" : "unsupported");
133
134 return 0;
135 }
136 early_initcall(compat_mode_detect);
137 #endif
138
start_thread(struct pt_regs * regs,unsigned long pc,unsigned long sp)139 void start_thread(struct pt_regs *regs, unsigned long pc,
140 unsigned long sp)
141 {
142 regs->status = SR_PIE;
143 if (has_fpu()) {
144 regs->status |= SR_FS_INITIAL;
145 /*
146 * Restore the initial value to the FP register
147 * before starting the user program.
148 */
149 fstate_restore(current, regs);
150 }
151 regs->epc = pc;
152 regs->sp = sp;
153
154 #ifdef CONFIG_64BIT
155 regs->status &= ~SR_UXL;
156
157 if (is_compat_task())
158 regs->status |= SR_UXL_32;
159 else
160 regs->status |= SR_UXL_64;
161 #endif
162 }
163
flush_thread(void)164 void flush_thread(void)
165 {
166 #ifdef CONFIG_FPU
167 /*
168 * Reset FPU state and context
169 * frm: round to nearest, ties to even (IEEE default)
170 * fflags: accrued exceptions cleared
171 */
172 fstate_off(current, task_pt_regs(current));
173 memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
174 #endif
175 #ifdef CONFIG_RISCV_ISA_V
176 /* Reset vector state */
177 riscv_v_vstate_ctrl_init(current);
178 riscv_v_vstate_off(task_pt_regs(current));
179 kfree(current->thread.vstate.datap);
180 memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
181 clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
182 #endif
183 }
184
arch_release_task_struct(struct task_struct * tsk)185 void arch_release_task_struct(struct task_struct *tsk)
186 {
187 /* Free the vector context of datap. */
188 if (has_vector())
189 riscv_v_thread_free(tsk);
190 }
191
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)192 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
193 {
194 fstate_save(src, task_pt_regs(src));
195 *dst = *src;
196 /* clear entire V context, including datap for a new task */
197 memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
198 memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
199 clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
200
201 return 0;
202 }
203
copy_thread(struct task_struct * p,const struct kernel_clone_args * args)204 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
205 {
206 unsigned long clone_flags = args->flags;
207 unsigned long usp = args->stack;
208 unsigned long tls = args->tls;
209 struct pt_regs *childregs = task_pt_regs(p);
210
211 memset(&p->thread.s, 0, sizeof(p->thread.s));
212
213 /* p->thread holds context to be restored by __switch_to() */
214 if (unlikely(args->fn)) {
215 /* Kernel thread */
216 memset(childregs, 0, sizeof(struct pt_regs));
217 /* Supervisor/Machine, irqs on: */
218 childregs->status = SR_PP | SR_PIE;
219
220 p->thread.s[0] = (unsigned long)args->fn;
221 p->thread.s[1] = (unsigned long)args->fn_arg;
222 } else {
223 *childregs = *(current_pt_regs());
224 /* Turn off status.VS */
225 riscv_v_vstate_off(childregs);
226 if (usp) /* User fork */
227 childregs->sp = usp;
228 if (clone_flags & CLONE_SETTLS)
229 childregs->tp = tls;
230 childregs->a0 = 0; /* Return value of fork() */
231 p->thread.s[0] = 0;
232 }
233 p->thread.riscv_v_flags = 0;
234 if (has_vector())
235 riscv_v_thread_alloc(p);
236 p->thread.ra = (unsigned long)ret_from_fork;
237 p->thread.sp = (unsigned long)childregs; /* kernel sp */
238 return 0;
239 }
240
arch_task_cache_init(void)241 void __init arch_task_cache_init(void)
242 {
243 riscv_v_setup_ctx_cache();
244 }
245