1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_PROCESSOR_H
7 #define _ASM_RISCV_PROCESSOR_H
8
9 #include <linux/const.h>
10 #include <linux/cache.h>
11 #include <linux/prctl.h>
12
13 #include <vdso/processor.h>
14
15 #include <asm/ptrace.h>
16 #include <asm/insn-def.h>
17 #include <asm/alternative-macros.h>
18 #include <asm/hwcap.h>
19 #include <asm/usercfi.h>
20
21 #define arch_get_mmap_end(addr, len, flags) \
22 ({ \
23 STACK_TOP_MAX; \
24 })
25
26 #define arch_get_mmap_base(addr, base) \
27 ({ \
28 base; \
29 })
30
31 #ifdef CONFIG_64BIT
32 #define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
33 #define STACK_TOP_MAX TASK_SIZE_64
34 #else
35 #define DEFAULT_MAP_WINDOW TASK_SIZE
36 #define STACK_TOP_MAX TASK_SIZE
37 #endif
38 #define STACK_ALIGN 16
39
40 #define STACK_TOP DEFAULT_MAP_WINDOW
41
42 #ifdef CONFIG_MMU
43 #define user_max_virt_addr() arch_get_mmap_end(ULONG_MAX, 0, 0)
44 #else
45 #define user_max_virt_addr() 0
46 #endif /* CONFIG_MMU */
47
48 /*
49 * This decides where the kernel will search for a free chunk of vm
50 * space during mmap's.
51 */
52 #ifdef CONFIG_64BIT
53 #define TASK_UNMAPPED_BASE PAGE_ALIGN((UL(1) << MMAP_MIN_VA_BITS) / 3)
54 #else
55 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
56 #endif
57
58 #ifndef __ASSEMBLER__
59
60 struct task_struct;
61 struct pt_regs;
62
63 /*
64 * We use a flag to track in-kernel Vector context. Currently the flag has the
65 * following meaning:
66 *
67 * - bit 0: indicates whether the in-kernel Vector context is active. The
68 * activation of this state disables the preemption. On a non-RT kernel, it
69 * also disable bh.
70 * - bits 8: is used for tracking preemptible kernel-mode Vector, when
71 * RISCV_ISA_V_PREEMPTIVE is enabled. Calling kernel_vector_begin() does not
72 * disable the preemption if the thread's kernel_vstate.datap is allocated.
73 * Instead, the kernel set this bit field. Then the trap entry/exit code
74 * knows if we are entering/exiting the context that owns preempt_v.
75 * - 0: the task is not using preempt_v
76 * - 1: the task is actively using preempt_v. But whether does the task own
77 * the preempt_v context is decided by bits in RISCV_V_CTX_DEPTH_MASK.
78 * - bit 16-23 are RISCV_V_CTX_DEPTH_MASK, used by context tracking routine
79 * when preempt_v starts:
80 * - 0: the task is actively using, and own preempt_v context.
81 * - non-zero: the task was using preempt_v, but then took a trap within.
82 * Thus, the task does not own preempt_v. Any use of Vector will have to
83 * save preempt_v, if dirty, and fallback to non-preemptible kernel-mode
84 * Vector.
85 * - bit 29: The thread voluntarily calls schedule() while holding an active
86 * preempt_v. All preempt_v context should be dropped in such case because
87 * V-regs are caller-saved. Only sstatus.VS=ON is persisted across a
88 * schedule() call.
89 * - bit 30: The in-kernel preempt_v context is saved, and requries to be
90 * restored when returning to the context that owns the preempt_v.
91 * - bit 31: The in-kernel preempt_v context is dirty, as signaled by the
92 * trap entry code. Any context switches out-of current task need to save
93 * it to the task's in-kernel V context. Also, any traps nesting on-top-of
94 * preempt_v requesting to use V needs a save.
95 */
96 #define RISCV_V_CTX_DEPTH_MASK 0x00ff0000
97
98 #define RISCV_V_CTX_UNIT_DEPTH 0x00010000
99 #define RISCV_KERNEL_MODE_V 0x00000001
100 #define RISCV_PREEMPT_V 0x00000100
101 #define RISCV_PREEMPT_V_DIRTY 0x80000000
102 #define RISCV_PREEMPT_V_NEED_RESTORE 0x40000000
103 #define RISCV_PREEMPT_V_IN_SCHEDULE 0x20000000
104
105 /* CPU-specific state of a task */
106 struct thread_struct {
107 /* Callee-saved registers */
108 unsigned long ra;
109 unsigned long sp; /* Kernel mode stack */
110 unsigned long s[12]; /* s[0]: frame pointer */
111 struct __riscv_d_ext_state fstate;
112 unsigned long bad_cause;
113 unsigned long envcfg;
114 unsigned long sum;
115 u32 riscv_v_flags;
116 u32 vstate_ctrl;
117 struct __riscv_v_ext_state vstate;
118 unsigned long align_ctl;
119 struct __riscv_v_ext_state kernel_vstate;
120 #ifdef CONFIG_SMP
121 /* Flush the icache on migration */
122 bool force_icache_flush;
123 /* A forced icache flush is not needed if migrating to the previous cpu. */
124 unsigned int prev_cpu;
125 #endif
126 };
127
128 /* Whitelist the fstate from the task_struct for hardened usercopy */
arch_thread_struct_whitelist(unsigned long * offset,unsigned long * size)129 static inline void arch_thread_struct_whitelist(unsigned long *offset,
130 unsigned long *size)
131 {
132 *offset = offsetof(struct thread_struct, fstate);
133 *size = sizeof_field(struct thread_struct, fstate);
134 }
135
136 #define INIT_THREAD { \
137 .sp = sizeof(init_stack) + (long)&init_stack, \
138 .align_ctl = PR_UNALIGN_NOPRINT, \
139 }
140
141 #define task_pt_regs(tsk) \
142 ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
143 - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
144
145 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc)
146 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
147
148 #define PREFETCH_ASM(x) \
149 ALTERNATIVE(__nops(1), PREFETCH_R(x, 0), 0, \
150 RISCV_ISA_EXT_ZICBOP, CONFIG_RISCV_ISA_ZICBOP)
151
152 #define PREFETCHW_ASM(x) \
153 ALTERNATIVE(__nops(1), PREFETCH_W(x, 0), 0, \
154 RISCV_ISA_EXT_ZICBOP, CONFIG_RISCV_ISA_ZICBOP)
155
156 #ifdef CONFIG_RISCV_ISA_ZICBOP
157 #define ARCH_HAS_PREFETCH
prefetch(const void * x)158 static inline void prefetch(const void *x)
159 {
160 __asm__ __volatile__(PREFETCH_ASM(%0) : : "r" (x) : "memory");
161 }
162
163 #define ARCH_HAS_PREFETCHW
prefetchw(const void * x)164 static inline void prefetchw(const void *x)
165 {
166 __asm__ __volatile__(PREFETCHW_ASM(%0) : : "r" (x) : "memory");
167 }
168 #endif /* CONFIG_RISCV_ISA_ZICBOP */
169
170 /* Do necessary setup to start up a newly executed thread. */
171 extern void start_thread(struct pt_regs *regs,
172 unsigned long pc, unsigned long sp);
173
174 extern unsigned long __get_wchan(struct task_struct *p);
175
176
wait_for_interrupt(void)177 static inline void wait_for_interrupt(void)
178 {
179 __asm__ __volatile__ ("wfi");
180 }
181
182 extern phys_addr_t dma32_phys_limit;
183
184 struct device_node;
185 int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid);
186 int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hartid);
187 int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid);
188
189 extern void riscv_fill_hwcap(void);
190 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
191
192 extern unsigned long signal_minsigstksz __ro_after_init;
193
194 #ifdef CONFIG_RISCV_ISA_V
195 /* Userspace interface for PR_RISCV_V_{SET,GET}_VS prctl()s: */
196 #define RISCV_V_SET_CONTROL(arg) riscv_v_vstate_ctrl_set_current(arg)
197 #define RISCV_V_GET_CONTROL() riscv_v_vstate_ctrl_get_current()
198 extern long riscv_v_vstate_ctrl_set_current(unsigned long arg);
199 extern long riscv_v_vstate_ctrl_get_current(void);
200 #endif /* CONFIG_RISCV_ISA_V */
201
202 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long addr);
203 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
204
205 #define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr))
206 #define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
207
208 #define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2) riscv_set_icache_flush_ctx(arg1, arg2)
209 extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread);
210
211 #ifdef CONFIG_RISCV_ISA_SUPM
212 /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
213 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
214 long get_tagged_addr_ctrl(struct task_struct *task);
215 #define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg)
216 #define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current)
217 #endif
218
219 #endif /* __ASSEMBLER__ */
220
221 #endif /* _ASM_RISCV_PROCESSOR_H */
222