1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Chen Liqin <liqin.chen@sunplusct.com>
5 * Lennox Wu <lennox.wu@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 * Copyright (C) 2017 SiFive
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/cpu.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/tick.h>
17 #include <linux/ptrace.h>
18 #include <linux/uaccess.h>
19 #include <linux/personality.h>
20 #include <linux/entry-common.h>
21
22 #include <asm/asm-prototypes.h>
23 #include <asm/unistd.h>
24 #include <asm/processor.h>
25 #include <asm/csr.h>
26 #include <asm/stacktrace.h>
27 #include <asm/string.h>
28 #include <asm/switch_to.h>
29 #include <asm/thread_info.h>
30 #include <asm/cpuidle.h>
31 #include <asm/vector.h>
32 #include <asm/cpufeature.h>
33 #include <asm/exec.h>
34 #include <asm/usercfi.h>
35
36 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
37 #include <linux/stackprotector.h>
38 unsigned long __stack_chk_guard __read_mostly;
39 EXPORT_SYMBOL(__stack_chk_guard);
40 #endif
41
42 extern asmlinkage void ret_from_fork_kernel_asm(void);
43 extern asmlinkage void ret_from_fork_user_asm(void);
44
arch_cpu_idle(void)45 void noinstr arch_cpu_idle(void)
46 {
47 cpu_do_idle();
48 }
49
set_unalign_ctl(struct task_struct * tsk,unsigned int val)50 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
51 {
52 if (!unaligned_ctl_available())
53 return -EINVAL;
54
55 tsk->thread.align_ctl = val;
56 return 0;
57 }
58
get_unalign_ctl(struct task_struct * tsk,unsigned long adr)59 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
60 {
61 if (!unaligned_ctl_available())
62 return -EINVAL;
63
64 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
65 }
66
__show_regs(struct pt_regs * regs)67 void __show_regs(struct pt_regs *regs)
68 {
69 show_regs_print_info(KERN_DEFAULT);
70
71 if (!user_mode(regs)) {
72 pr_cont("epc : %pS\n", (void *)regs->epc);
73 pr_cont(" ra : %pS\n", (void *)regs->ra);
74 }
75
76 pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
77 regs->epc, regs->ra, regs->sp);
78 pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
79 regs->gp, regs->tp, regs->t0);
80 pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
81 regs->t1, regs->t2, regs->s0);
82 pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
83 regs->s1, regs->a0, regs->a1);
84 pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
85 regs->a2, regs->a3, regs->a4);
86 pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
87 regs->a5, regs->a6, regs->a7);
88 pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
89 regs->s2, regs->s3, regs->s4);
90 pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
91 regs->s5, regs->s6, regs->s7);
92 pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
93 regs->s8, regs->s9, regs->s10);
94 pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
95 regs->s11, regs->t3, regs->t4);
96 pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT " ssp : " REG_FMT "\n",
97 regs->t5, regs->t6, get_active_shstk(current));
98
99 pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
100 regs->status, regs->badaddr, regs->cause);
101 }
show_regs(struct pt_regs * regs)102 void show_regs(struct pt_regs *regs)
103 {
104 __show_regs(regs);
105 if (!user_mode(regs))
106 dump_backtrace(regs, NULL, KERN_DEFAULT);
107 }
108
arch_align_stack(unsigned long sp)109 unsigned long arch_align_stack(unsigned long sp)
110 {
111 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
112 sp -= get_random_u32_below(PAGE_SIZE);
113 return sp & ~0xf;
114 }
115
116 #ifdef CONFIG_COMPAT
117 static bool compat_mode_supported __read_mostly;
118
compat_elf_check_arch(Elf32_Ehdr * hdr)119 bool compat_elf_check_arch(Elf32_Ehdr *hdr)
120 {
121 return compat_mode_supported &&
122 hdr->e_machine == EM_RISCV &&
123 hdr->e_ident[EI_CLASS] == ELFCLASS32;
124 }
125
compat_mode_detect(void)126 static int __init compat_mode_detect(void)
127 {
128 unsigned long tmp = csr_read(CSR_STATUS);
129
130 csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
131 compat_mode_supported =
132 (csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
133
134 csr_write(CSR_STATUS, tmp);
135
136 pr_info("riscv: ELF compat mode %s",
137 compat_mode_supported ? "supported" : "unsupported");
138
139 return 0;
140 }
141 early_initcall(compat_mode_detect);
142 #endif
143
start_thread(struct pt_regs * regs,unsigned long pc,unsigned long sp)144 void start_thread(struct pt_regs *regs, unsigned long pc,
145 unsigned long sp)
146 {
147 regs->status = SR_PIE;
148 if (has_fpu()) {
149 regs->status |= SR_FS_INITIAL;
150 /*
151 * Restore the initial value to the FP register
152 * before starting the user program.
153 */
154 fstate_restore(current, regs);
155 }
156 regs->epc = pc;
157 regs->sp = sp;
158
159 /*
160 * clear shadow stack state on exec.
161 * libc will set it later via prctl.
162 */
163 set_shstk_lock(current, false);
164 set_shstk_status(current, false);
165 set_shstk_base(current, 0, 0);
166 set_active_shstk(current, 0);
167 /*
168 * disable indirect branch tracking on exec.
169 * libc will enable it later via prctl.
170 */
171 set_indir_lp_lock(current, false);
172 set_indir_lp_status(current, false);
173
174 #ifdef CONFIG_64BIT
175 regs->status &= ~SR_UXL;
176
177 if (is_compat_task())
178 regs->status |= SR_UXL_32;
179 else
180 regs->status |= SR_UXL_64;
181 #endif
182 }
183
flush_thread(void)184 void flush_thread(void)
185 {
186 #ifdef CONFIG_FPU
187 /*
188 * Reset FPU state and context
189 * frm: round to nearest, ties to even (IEEE default)
190 * fflags: accrued exceptions cleared
191 */
192 fstate_off(current, task_pt_regs(current));
193 memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
194 #endif
195 #ifdef CONFIG_RISCV_ISA_V
196 /* Reset vector state */
197 riscv_v_vstate_ctrl_init(current);
198 riscv_v_vstate_off(task_pt_regs(current));
199 kfree(current->thread.vstate.datap);
200 memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
201 clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
202 #endif
203 #ifdef CONFIG_RISCV_ISA_SUPM
204 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
205 envcfg_update_bits(current, ENVCFG_PMM, ENVCFG_PMM_PMLEN_0);
206 #endif
207 }
208
arch_release_task_struct(struct task_struct * tsk)209 void arch_release_task_struct(struct task_struct *tsk)
210 {
211 /* Free the vector context of datap. */
212 if (has_vector() || has_xtheadvector())
213 riscv_v_thread_free(tsk);
214 }
215
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)216 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
217 {
218 fstate_save(src, task_pt_regs(src));
219 *dst = *src;
220 /* clear entire V context, including datap for a new task */
221 memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
222 memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
223 clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
224
225 return 0;
226 }
227
ret_from_fork_kernel(void * fn_arg,int (* fn)(void *),struct pt_regs * regs)228 asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs)
229 {
230 fn(fn_arg);
231
232 syscall_exit_to_user_mode(regs);
233 }
234
ret_from_fork_user(struct pt_regs * regs)235 asmlinkage void ret_from_fork_user(struct pt_regs *regs)
236 {
237 syscall_exit_to_user_mode(regs);
238 }
239
copy_thread(struct task_struct * p,const struct kernel_clone_args * args)240 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
241 {
242 u64 clone_flags = args->flags;
243 unsigned long usp = args->stack;
244 unsigned long tls = args->tls;
245 unsigned long ssp = 0;
246 struct pt_regs *childregs = task_pt_regs(p);
247
248 /* Ensure all threads in this mm have the same pointer masking mode. */
249 if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM) && p->mm && (clone_flags & CLONE_VM))
250 set_bit(MM_CONTEXT_LOCK_PMLEN, &p->mm->context.flags);
251
252 memset(&p->thread.s, 0, sizeof(p->thread.s));
253
254 /* p->thread holds context to be restored by __switch_to() */
255 if (unlikely(args->fn)) {
256 /* Kernel thread */
257 memset(childregs, 0, sizeof(struct pt_regs));
258 /* Supervisor/Machine, irqs on: */
259 childregs->status = SR_PP | SR_PIE;
260
261 p->thread.s[0] = (unsigned long)args->fn;
262 p->thread.s[1] = (unsigned long)args->fn_arg;
263 p->thread.ra = (unsigned long)ret_from_fork_kernel_asm;
264 } else {
265 /* allocate new shadow stack if needed. In case of CLONE_VM we have to */
266 ssp = shstk_alloc_thread_stack(p, args);
267 if (IS_ERR_VALUE(ssp))
268 return PTR_ERR((void *)ssp);
269
270 *childregs = *(current_pt_regs());
271 /* Turn off status.VS */
272 riscv_v_vstate_off(childregs);
273 if (usp) /* User fork */
274 childregs->sp = usp;
275 /* if needed, set new ssp */
276 if (ssp)
277 set_active_shstk(p, ssp);
278 if (clone_flags & CLONE_SETTLS)
279 childregs->tp = tls;
280 childregs->a0 = 0; /* Return value of fork() */
281 p->thread.ra = (unsigned long)ret_from_fork_user_asm;
282 }
283 p->thread.riscv_v_flags = 0;
284 if (has_vector() || has_xtheadvector())
285 riscv_v_thread_alloc(p);
286 p->thread.sp = (unsigned long)childregs; /* kernel sp */
287 return 0;
288 }
289
arch_task_cache_init(void)290 void __init arch_task_cache_init(void)
291 {
292 riscv_v_setup_ctx_cache();
293 }
294
295 #ifdef CONFIG_RISCV_ISA_SUPM
296 enum {
297 PMLEN_0 = 0,
298 PMLEN_7 = 7,
299 PMLEN_16 = 16,
300 };
301
302 static bool have_user_pmlen_7;
303 static bool have_user_pmlen_16;
304
305 /*
306 * Control the relaxed ABI allowing tagged user addresses into the kernel.
307 */
308 static unsigned int tagged_addr_disabled;
309
set_tagged_addr_ctrl(struct task_struct * task,unsigned long arg)310 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
311 {
312 unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE;
313 struct thread_info *ti = task_thread_info(task);
314 struct mm_struct *mm = task->mm;
315 unsigned long pmm;
316 u8 pmlen;
317
318 if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
319 return -EINVAL;
320
321 if (is_compat_thread(ti))
322 return -EINVAL;
323
324 if (arg & ~valid_mask)
325 return -EINVAL;
326
327 /*
328 * Prefer the smallest PMLEN that satisfies the user's request,
329 * in case choosing a larger PMLEN has a performance impact.
330 */
331 pmlen = FIELD_GET(PR_PMLEN_MASK, arg);
332 if (pmlen == PMLEN_0) {
333 pmm = ENVCFG_PMM_PMLEN_0;
334 } else if (pmlen <= PMLEN_7 && have_user_pmlen_7) {
335 pmlen = PMLEN_7;
336 pmm = ENVCFG_PMM_PMLEN_7;
337 } else if (pmlen <= PMLEN_16 && have_user_pmlen_16) {
338 pmlen = PMLEN_16;
339 pmm = ENVCFG_PMM_PMLEN_16;
340 } else {
341 return -EINVAL;
342 }
343
344 /*
345 * Do not allow the enabling of the tagged address ABI if globally
346 * disabled via sysctl abi.tagged_addr_disabled, if pointer masking
347 * is disabled for userspace.
348 */
349 if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen))
350 return -EINVAL;
351
352 if (!(arg & PR_TAGGED_ADDR_ENABLE)) {
353 pmlen = PMLEN_0;
354 pmm = ENVCFG_PMM_PMLEN_0;
355 }
356
357 if (mmap_write_lock_killable(mm))
358 return -EINTR;
359
360 if (test_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags) && mm->context.pmlen != pmlen) {
361 mmap_write_unlock(mm);
362 return -EBUSY;
363 }
364
365 envcfg_update_bits(task, ENVCFG_PMM, pmm);
366 mm->context.pmlen = pmlen;
367
368 mmap_write_unlock(mm);
369
370 return 0;
371 }
372
get_tagged_addr_ctrl(struct task_struct * task)373 long get_tagged_addr_ctrl(struct task_struct *task)
374 {
375 struct thread_info *ti = task_thread_info(task);
376 long ret = 0;
377
378 if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
379 return -EINVAL;
380
381 if (is_compat_thread(ti))
382 return -EINVAL;
383
384 /*
385 * The mm context's pmlen is set only when the tagged address ABI is
386 * enabled, so the effective PMLEN must be extracted from envcfg.PMM.
387 */
388 switch (task->thread.envcfg & ENVCFG_PMM) {
389 case ENVCFG_PMM_PMLEN_7:
390 ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7);
391 break;
392 case ENVCFG_PMM_PMLEN_16:
393 ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_16);
394 break;
395 }
396
397 if (task->mm->context.pmlen)
398 ret |= PR_TAGGED_ADDR_ENABLE;
399
400 return ret;
401 }
402
try_to_set_pmm(unsigned long value)403 static bool try_to_set_pmm(unsigned long value)
404 {
405 csr_set(CSR_ENVCFG, value);
406 return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
407 }
408
409 /*
410 * Global sysctl to disable the tagged user addresses support. This control
411 * only prevents the tagged address ABI enabling via prctl() and does not
412 * disable it for tasks that already opted in to the relaxed ABI.
413 */
414
415 static const struct ctl_table tagged_addr_sysctl_table[] = {
416 {
417 .procname = "tagged_addr_disabled",
418 .mode = 0644,
419 .data = &tagged_addr_disabled,
420 .maxlen = sizeof(int),
421 .proc_handler = proc_dointvec_minmax,
422 .extra1 = SYSCTL_ZERO,
423 .extra2 = SYSCTL_ONE,
424 },
425 };
426
tagged_addr_init(void)427 static int __init tagged_addr_init(void)
428 {
429 if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
430 return 0;
431
432 /*
433 * envcfg.PMM is a WARL field. Detect which values are supported.
434 * Assume the supported PMLEN values are the same on all harts.
435 */
436 csr_clear(CSR_ENVCFG, ENVCFG_PMM);
437 have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
438 have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
439
440 if (!register_sysctl("abi", tagged_addr_sysctl_table))
441 return -EINVAL;
442
443 return 0;
444 }
445 core_initcall(tagged_addr_init);
446 #endif /* CONFIG_RISCV_ISA_SUPM */
447