xref: /linux/arch/arm64/mm/gcs.c (revision 6fb44438a5e1897a72dd11139274735256be8069)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/mm.h>
4 #include <linux/mman.h>
5 #include <linux/syscalls.h>
6 #include <linux/types.h>
7 
8 #include <asm/cmpxchg.h>
9 #include <asm/cpufeature.h>
10 #include <asm/gcs.h>
11 #include <asm/page.h>
12 
alloc_gcs(unsigned long addr,unsigned long size)13 static unsigned long alloc_gcs(unsigned long addr, unsigned long size)
14 {
15 	int flags = MAP_ANONYMOUS | MAP_PRIVATE;
16 	struct mm_struct *mm = current->mm;
17 	unsigned long mapped_addr, unused;
18 
19 	if (addr)
20 		flags |= MAP_FIXED_NOREPLACE;
21 
22 	mmap_write_lock(mm);
23 	mapped_addr = do_mmap(NULL, addr, size, PROT_READ, flags,
24 			      VM_SHADOW_STACK | VM_WRITE, 0, &unused, NULL);
25 	mmap_write_unlock(mm);
26 
27 	return mapped_addr;
28 }
29 
gcs_size(unsigned long size)30 static unsigned long gcs_size(unsigned long size)
31 {
32 	if (size)
33 		return PAGE_ALIGN(size);
34 
35 	/* Allocate RLIMIT_STACK/2 with limits of PAGE_SIZE..2G */
36 	size = PAGE_ALIGN(min_t(unsigned long long,
37 				rlimit(RLIMIT_STACK) / 2, SZ_2G));
38 	return max(PAGE_SIZE, size);
39 }
40 
gcs_alloc_thread_stack(struct task_struct * tsk,const struct kernel_clone_args * args)41 unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
42 				     const struct kernel_clone_args *args)
43 {
44 	unsigned long addr, size;
45 
46 	if (!system_supports_gcs())
47 		return 0;
48 
49 	if (!task_gcs_el0_enabled(tsk))
50 		return 0;
51 
52 	if ((args->flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM) {
53 		tsk->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
54 		return 0;
55 	}
56 
57 	size = args->stack_size / 2;
58 
59 	size = gcs_size(size);
60 	addr = alloc_gcs(0, size);
61 	if (IS_ERR_VALUE(addr))
62 		return addr;
63 
64 	tsk->thread.gcs_base = addr;
65 	tsk->thread.gcs_size = size;
66 	tsk->thread.gcspr_el0 = addr + size - sizeof(u64);
67 
68 	return addr;
69 }
70 
SYSCALL_DEFINE3(map_shadow_stack,unsigned long,addr,unsigned long,size,unsigned int,flags)71 SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
72 {
73 	unsigned long alloc_size;
74 	unsigned long __user *cap_ptr;
75 	unsigned long cap_val;
76 	int ret = 0;
77 	int cap_offset;
78 
79 	if (!system_supports_gcs())
80 		return -EOPNOTSUPP;
81 
82 	if (flags & ~(SHADOW_STACK_SET_TOKEN | SHADOW_STACK_SET_MARKER))
83 		return -EINVAL;
84 
85 	if (!PAGE_ALIGNED(addr))
86 		return -EINVAL;
87 
88 	if (size == 8 || !IS_ALIGNED(size, 8))
89 		return -EINVAL;
90 
91 	/*
92 	 * An overflow would result in attempting to write the restore token
93 	 * to the wrong location. Not catastrophic, but just return the right
94 	 * error code and block it.
95 	 */
96 	alloc_size = PAGE_ALIGN(size);
97 	if (alloc_size < size)
98 		return -EOVERFLOW;
99 
100 	addr = alloc_gcs(addr, alloc_size);
101 	if (IS_ERR_VALUE(addr))
102 		return addr;
103 
104 	/*
105 	 * Put a cap token at the end of the allocated region so it
106 	 * can be switched to.
107 	 */
108 	if (flags & SHADOW_STACK_SET_TOKEN) {
109 		/* Leave an extra empty frame as a top of stack marker? */
110 		if (flags & SHADOW_STACK_SET_MARKER)
111 			cap_offset = 2;
112 		else
113 			cap_offset = 1;
114 
115 		cap_ptr = (unsigned long __user *)(addr + size -
116 						   (cap_offset * sizeof(unsigned long)));
117 		cap_val = GCS_CAP(cap_ptr);
118 
119 		put_user_gcs(cap_val, cap_ptr, &ret);
120 		if (ret != 0) {
121 			vm_munmap(addr, size);
122 			return -EFAULT;
123 		}
124 
125 		/*
126 		 * Ensure the new cap is ordered before standard
127 		 * memory accesses to the same location.
128 		 */
129 		gcsb_dsync();
130 	}
131 
132 	return addr;
133 }
134 
135 /*
136  * Apply the GCS mode configured for the specified task to the
137  * hardware.
138  */
gcs_set_el0_mode(struct task_struct * task)139 void gcs_set_el0_mode(struct task_struct *task)
140 {
141 	u64 gcscre0_el1 = GCSCRE0_EL1_nTR;
142 
143 	if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)
144 		gcscre0_el1 |= GCSCRE0_EL1_RVCHKEN | GCSCRE0_EL1_PCRSEL;
145 
146 	if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_WRITE)
147 		gcscre0_el1 |= GCSCRE0_EL1_STREn;
148 
149 	if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_PUSH)
150 		gcscre0_el1 |= GCSCRE0_EL1_PUSHMEn;
151 
152 	write_sysreg_s(gcscre0_el1, SYS_GCSCRE0_EL1);
153 }
154 
gcs_free(struct task_struct * task)155 void gcs_free(struct task_struct *task)
156 {
157 	if (!system_supports_gcs())
158 		return;
159 
160 	if (!task->mm || task->mm != current->mm)
161 		return;
162 
163 	if (task->thread.gcs_base)
164 		vm_munmap(task->thread.gcs_base, task->thread.gcs_size);
165 
166 	task->thread.gcspr_el0 = 0;
167 	task->thread.gcs_base = 0;
168 	task->thread.gcs_size = 0;
169 }
170 
arch_set_shadow_stack_status(struct task_struct * task,unsigned long arg)171 int arch_set_shadow_stack_status(struct task_struct *task, unsigned long arg)
172 {
173 	unsigned long gcs, size;
174 	int ret;
175 
176 	if (!system_supports_gcs())
177 		return -EINVAL;
178 
179 	if (is_compat_thread(task_thread_info(task)))
180 		return -EINVAL;
181 
182 	/* Reject unknown flags */
183 	if (arg & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
184 		return -EINVAL;
185 
186 	ret = gcs_check_locked(task, arg);
187 	if (ret != 0)
188 		return ret;
189 
190 	/* If we are enabling GCS then make sure we have a stack */
191 	if (arg & PR_SHADOW_STACK_ENABLE &&
192 	    !task_gcs_el0_enabled(task)) {
193 		/* Do not allow GCS to be reenabled */
194 		if (task->thread.gcs_base || task->thread.gcspr_el0)
195 			return -EINVAL;
196 
197 		if (task != current)
198 			return -EBUSY;
199 
200 		size = gcs_size(0);
201 		gcs = alloc_gcs(0, size);
202 		if (!gcs)
203 			return -ENOMEM;
204 
205 		task->thread.gcspr_el0 = gcs + size - sizeof(u64);
206 		task->thread.gcs_base = gcs;
207 		task->thread.gcs_size = size;
208 		if (task == current)
209 			write_sysreg_s(task->thread.gcspr_el0,
210 				       SYS_GCSPR_EL0);
211 	}
212 
213 	task->thread.gcs_el0_mode = arg;
214 	if (task == current)
215 		gcs_set_el0_mode(task);
216 
217 	return 0;
218 }
219 
arch_get_shadow_stack_status(struct task_struct * task,unsigned long __user * arg)220 int arch_get_shadow_stack_status(struct task_struct *task,
221 				 unsigned long __user *arg)
222 {
223 	if (!system_supports_gcs())
224 		return -EINVAL;
225 
226 	if (is_compat_thread(task_thread_info(task)))
227 		return -EINVAL;
228 
229 	return put_user(task->thread.gcs_el0_mode, arg);
230 }
231 
arch_lock_shadow_stack_status(struct task_struct * task,unsigned long arg)232 int arch_lock_shadow_stack_status(struct task_struct *task,
233 				  unsigned long arg)
234 {
235 	if (!system_supports_gcs())
236 		return -EINVAL;
237 
238 	if (is_compat_thread(task_thread_info(task)))
239 		return -EINVAL;
240 
241 	/*
242 	 * We support locking unknown bits so applications can prevent
243 	 * any changes in a future proof manner.
244 	 */
245 	task->thread.gcs_el0_locked |= arg;
246 
247 	return 0;
248 }
249