1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Based on arch/arm/include/asm/uaccess.h
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7 #ifndef __ASM_UACCESS_H
8 #define __ASM_UACCESS_H
9
10 #include <asm/alternative.h>
11 #include <asm/kernel-pgtable.h>
12 #include <asm/sysreg.h>
13
14 /*
15 * User space memory access functions
16 */
17 #include <linux/bitops.h>
18 #include <linux/kasan-checks.h>
19 #include <linux/string.h>
20
21 #include <asm/asm-extable.h>
22 #include <asm/cpufeature.h>
23 #include <asm/mmu.h>
24 #include <asm/mte.h>
25 #include <asm/ptrace.h>
26 #include <asm/memory.h>
27 #include <asm/extable.h>
28
29 static inline int __access_ok(const void __user *ptr, unsigned long size);
30
31 /*
32 * Test whether a block of memory is a valid user space address.
33 * Returns 1 if the range is valid, 0 otherwise.
34 *
35 * This is equivalent to the following test:
36 * (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
37 */
access_ok(const void __user * addr,unsigned long size)38 static inline int access_ok(const void __user *addr, unsigned long size)
39 {
40 /*
41 * Asynchronous I/O running in a kernel thread does not have the
42 * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
43 * the user address before checking.
44 */
45 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
46 (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
47 addr = untagged_addr(addr);
48
49 return likely(__access_ok(addr, size));
50 }
51 #define access_ok access_ok
52
53 #include <asm-generic/access_ok.h>
54
55 /*
56 * User access enabling/disabling.
57 */
58 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
__uaccess_ttbr0_disable(void)59 static inline void __uaccess_ttbr0_disable(void)
60 {
61 unsigned long flags, ttbr;
62
63 local_irq_save(flags);
64 ttbr = read_sysreg(ttbr1_el1);
65 ttbr &= ~TTBR_ASID_MASK;
66 /* reserved_pg_dir placed before swapper_pg_dir */
67 write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
68 /* Set reserved ASID */
69 write_sysreg(ttbr, ttbr1_el1);
70 isb();
71 local_irq_restore(flags);
72 }
73
__uaccess_ttbr0_enable(void)74 static inline void __uaccess_ttbr0_enable(void)
75 {
76 unsigned long flags, ttbr0, ttbr1;
77
78 /*
79 * Disable interrupts to avoid preemption between reading the 'ttbr0'
80 * variable and the MSR. A context switch could trigger an ASID
81 * roll-over and an update of 'ttbr0'.
82 */
83 local_irq_save(flags);
84 ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
85
86 /* Restore active ASID */
87 ttbr1 = read_sysreg(ttbr1_el1);
88 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
89 ttbr1 |= ttbr0 & TTBR_ASID_MASK;
90 write_sysreg(ttbr1, ttbr1_el1);
91
92 /* Restore user page table */
93 write_sysreg(ttbr0, ttbr0_el1);
94 isb();
95 local_irq_restore(flags);
96 }
97
uaccess_ttbr0_disable(void)98 static inline bool uaccess_ttbr0_disable(void)
99 {
100 if (!system_uses_ttbr0_pan())
101 return false;
102 __uaccess_ttbr0_disable();
103 return true;
104 }
105
uaccess_ttbr0_enable(void)106 static inline bool uaccess_ttbr0_enable(void)
107 {
108 if (!system_uses_ttbr0_pan())
109 return false;
110 __uaccess_ttbr0_enable();
111 return true;
112 }
113 #else
uaccess_ttbr0_disable(void)114 static inline bool uaccess_ttbr0_disable(void)
115 {
116 return false;
117 }
118
uaccess_ttbr0_enable(void)119 static inline bool uaccess_ttbr0_enable(void)
120 {
121 return false;
122 }
123 #endif
124
__uaccess_disable_hw_pan(void)125 static inline void __uaccess_disable_hw_pan(void)
126 {
127 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN));
128 }
129
__uaccess_enable_hw_pan(void)130 static inline void __uaccess_enable_hw_pan(void)
131 {
132 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN));
133 }
134
uaccess_disable_privileged(void)135 static inline void uaccess_disable_privileged(void)
136 {
137 mte_disable_tco();
138
139 if (uaccess_ttbr0_disable())
140 return;
141
142 __uaccess_enable_hw_pan();
143 }
144
uaccess_enable_privileged(void)145 static inline void uaccess_enable_privileged(void)
146 {
147 mte_enable_tco();
148
149 if (uaccess_ttbr0_enable())
150 return;
151
152 __uaccess_disable_hw_pan();
153 }
154
155 /*
156 * Sanitize a uaccess pointer such that it cannot reach any kernel address.
157 *
158 * Clearing bit 55 ensures the pointer cannot address any portion of the TTBR1
159 * address range (i.e. any kernel address), and either the pointer falls within
160 * the TTBR0 address range or must cause a fault.
161 */
162 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
__uaccess_mask_ptr(const void __user * ptr)163 static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
164 {
165 void __user *safe_ptr;
166
167 asm volatile(
168 " bic %0, %1, %2\n"
169 : "=r" (safe_ptr)
170 : "r" (ptr),
171 "i" (BIT(55))
172 );
173
174 return safe_ptr;
175 }
176
177 /*
178 * The "__xxx" versions of the user access functions do not verify the address
179 * space - it must have been done previously with a separate "access_ok()"
180 * call.
181 *
182 * The "__xxx_error" versions set the third argument to -EFAULT if an error
183 * occurs, and leave it unchanged on success.
184 */
185 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
186 #define __get_mem_asm(load, reg, x, addr, label, type) \
187 asm_goto_output( \
188 "1: " load " " reg "0, [%1]\n" \
189 _ASM_EXTABLE_##type##ACCESS(1b, %l2) \
190 : "=r" (x) \
191 : "r" (addr) : : label)
192 #else
193 #define __get_mem_asm(load, reg, x, addr, label, type) do { \
194 int __gma_err = 0; \
195 asm volatile( \
196 "1: " load " " reg "1, [%2]\n" \
197 "2:\n" \
198 _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
199 : "+r" (__gma_err), "=r" (x) \
200 : "r" (addr)); \
201 if (__gma_err) goto label; } while (0)
202 #endif
203
204 #define __raw_get_mem(ldr, x, ptr, label, type) \
205 do { \
206 unsigned long __gu_val; \
207 switch (sizeof(*(ptr))) { \
208 case 1: \
209 __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), label, type); \
210 break; \
211 case 2: \
212 __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), label, type); \
213 break; \
214 case 4: \
215 __get_mem_asm(ldr, "%w", __gu_val, (ptr), label, type); \
216 break; \
217 case 8: \
218 __get_mem_asm(ldr, "%x", __gu_val, (ptr), label, type); \
219 break; \
220 default: \
221 BUILD_BUG(); \
222 } \
223 (x) = (__force __typeof__(*(ptr)))__gu_val; \
224 } while (0)
225
226 /*
227 * We must not call into the scheduler between uaccess_ttbr0_enable() and
228 * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
229 * we must evaluate these outside of the critical section.
230 */
231 #define __raw_get_user(x, ptr, label) \
232 do { \
233 __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
234 __typeof__(x) __rgu_val; \
235 __chk_user_ptr(ptr); \
236 do { \
237 __label__ __rgu_failed; \
238 uaccess_ttbr0_enable(); \
239 __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, __rgu_failed, U); \
240 uaccess_ttbr0_disable(); \
241 (x) = __rgu_val; \
242 break; \
243 __rgu_failed: \
244 uaccess_ttbr0_disable(); \
245 goto label; \
246 } while (0); \
247 } while (0)
248
249 #define __get_user_error(x, ptr, err) \
250 do { \
251 __label__ __gu_failed; \
252 __typeof__(*(ptr)) __user *__p = (ptr); \
253 might_fault(); \
254 if (access_ok(__p, sizeof(*__p))) { \
255 __p = uaccess_mask_ptr(__p); \
256 __raw_get_user((x), __p, __gu_failed); \
257 } else { \
258 __gu_failed: \
259 (x) = (__force __typeof__(x))0; (err) = -EFAULT; \
260 } \
261 } while (0)
262
263 #define __get_user(x, ptr) \
264 ({ \
265 int __gu_err = 0; \
266 __get_user_error((x), (ptr), __gu_err); \
267 __gu_err; \
268 })
269
270 #define get_user __get_user
271
272 /*
273 * We must not call into the scheduler between __mte_enable_tco_async() and
274 * __mte_disable_tco_async(). As `dst` and `src` may contain blocking
275 * functions, we must evaluate these outside of the critical section.
276 */
277 #define __get_kernel_nofault(dst, src, type, err_label) \
278 do { \
279 __typeof__(dst) __gkn_dst = (dst); \
280 __typeof__(src) __gkn_src = (src); \
281 do { \
282 __label__ __gkn_label; \
283 \
284 __mte_enable_tco_async(); \
285 __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
286 (__force type *)(__gkn_src), __gkn_label, K); \
287 __mte_disable_tco_async(); \
288 break; \
289 __gkn_label: \
290 __mte_disable_tco_async(); \
291 goto err_label; \
292 } while (0); \
293 } while (0)
294
295 #define __put_mem_asm(store, reg, x, addr, label, type) \
296 asm goto( \
297 "1: " store " " reg "0, [%1]\n" \
298 "2:\n" \
299 _ASM_EXTABLE_##type##ACCESS(1b, %l2) \
300 : : "rZ" (x), "r" (addr) : : label)
301
302 #define __raw_put_mem(str, x, ptr, label, type) \
303 do { \
304 __typeof__(*(ptr)) __pu_val = (x); \
305 switch (sizeof(*(ptr))) { \
306 case 1: \
307 __put_mem_asm(str "b", "%w", __pu_val, (ptr), label, type); \
308 break; \
309 case 2: \
310 __put_mem_asm(str "h", "%w", __pu_val, (ptr), label, type); \
311 break; \
312 case 4: \
313 __put_mem_asm(str, "%w", __pu_val, (ptr), label, type); \
314 break; \
315 case 8: \
316 __put_mem_asm(str, "%x", __pu_val, (ptr), label, type); \
317 break; \
318 default: \
319 BUILD_BUG(); \
320 } \
321 } while (0)
322
323 /*
324 * We must not call into the scheduler between uaccess_ttbr0_enable() and
325 * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
326 * we must evaluate these outside of the critical section.
327 */
328 #define __raw_put_user(x, ptr, label) \
329 do { \
330 __label__ __rpu_failed; \
331 __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
332 __typeof__(*(ptr)) __rpu_val = (x); \
333 __chk_user_ptr(__rpu_ptr); \
334 \
335 do { \
336 uaccess_ttbr0_enable(); \
337 __raw_put_mem("sttr", __rpu_val, __rpu_ptr, __rpu_failed, U); \
338 uaccess_ttbr0_disable(); \
339 break; \
340 __rpu_failed: \
341 uaccess_ttbr0_disable(); \
342 goto label; \
343 } while (0); \
344 } while (0)
345
346 #define __put_user_error(x, ptr, err) \
347 do { \
348 __label__ __pu_failed; \
349 __typeof__(*(ptr)) __user *__p = (ptr); \
350 might_fault(); \
351 if (access_ok(__p, sizeof(*__p))) { \
352 __p = uaccess_mask_ptr(__p); \
353 __raw_put_user((x), __p, __pu_failed); \
354 } else { \
355 __pu_failed: \
356 (err) = -EFAULT; \
357 } \
358 } while (0)
359
360 #define __put_user(x, ptr) \
361 ({ \
362 int __pu_err = 0; \
363 __put_user_error((x), (ptr), __pu_err); \
364 __pu_err; \
365 })
366
367 #define put_user __put_user
368
369 /*
370 * We must not call into the scheduler between __mte_enable_tco_async() and
371 * __mte_disable_tco_async(). As `dst` and `src` may contain blocking
372 * functions, we must evaluate these outside of the critical section.
373 */
374 #define __put_kernel_nofault(dst, src, type, err_label) \
375 do { \
376 __typeof__(dst) __pkn_dst = (dst); \
377 __typeof__(src) __pkn_src = (src); \
378 \
379 do { \
380 __label__ __pkn_err; \
381 __mte_enable_tco_async(); \
382 __raw_put_mem("str", *((type *)(__pkn_src)), \
383 (__force type *)(__pkn_dst), __pkn_err, K); \
384 __mte_disable_tco_async(); \
385 break; \
386 __pkn_err: \
387 __mte_disable_tco_async(); \
388 goto err_label; \
389 } while (0); \
390 } while(0)
391
392 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
393 #define raw_copy_from_user(to, from, n) \
394 ({ \
395 unsigned long __acfu_ret; \
396 uaccess_ttbr0_enable(); \
397 __acfu_ret = __arch_copy_from_user((to), \
398 __uaccess_mask_ptr(from), (n)); \
399 uaccess_ttbr0_disable(); \
400 __acfu_ret; \
401 })
402
403 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
404 #define raw_copy_to_user(to, from, n) \
405 ({ \
406 unsigned long __actu_ret; \
407 uaccess_ttbr0_enable(); \
408 __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
409 (from), (n)); \
410 uaccess_ttbr0_disable(); \
411 __actu_ret; \
412 })
413
user_access_begin(const void __user * ptr,size_t len)414 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
415 {
416 if (unlikely(!access_ok(ptr,len)))
417 return 0;
418 uaccess_ttbr0_enable();
419 return 1;
420 }
421 #define user_access_begin(a,b) user_access_begin(a,b)
422 #define user_access_end() uaccess_ttbr0_disable()
423 #define arch_unsafe_put_user(x, ptr, label) \
424 __raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
425 #define arch_unsafe_get_user(x, ptr, label) \
426 __raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
427
428 /*
429 * KCSAN uses these to save and restore ttbr state.
430 * We do not support KCSAN with ARM64_SW_TTBR0_PAN, so
431 * they are no-ops.
432 */
user_access_save(void)433 static inline unsigned long user_access_save(void) { return 0; }
user_access_restore(unsigned long enabled)434 static inline void user_access_restore(unsigned long enabled) { }
435
436 /*
437 * We want the unsafe accessors to always be inlined and use
438 * the error labels - thus the macro games.
439 */
440 #define unsafe_copy_loop(dst, src, len, type, label) \
441 while (len >= sizeof(type)) { \
442 unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
443 dst += sizeof(type); \
444 src += sizeof(type); \
445 len -= sizeof(type); \
446 }
447
448 #define unsafe_copy_to_user(_dst,_src,_len,label) \
449 do { \
450 char __user *__ucu_dst = (_dst); \
451 const char *__ucu_src = (_src); \
452 size_t __ucu_len = (_len); \
453 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
454 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
455 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
456 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
457 } while (0)
458
459 #define INLINE_COPY_TO_USER
460 #define INLINE_COPY_FROM_USER
461
462 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
__clear_user(void __user * to,unsigned long n)463 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
464 {
465 if (access_ok(to, n)) {
466 uaccess_ttbr0_enable();
467 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
468 uaccess_ttbr0_disable();
469 }
470 return n;
471 }
472 #define clear_user __clear_user
473
474 extern long strncpy_from_user(char *dest, const char __user *src, long count);
475
476 extern __must_check long strnlen_user(const char __user *str, long n);
477
478 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
479 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
480
__copy_from_user_flushcache(void * dst,const void __user * src,unsigned size)481 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
482 {
483 kasan_check_write(dst, size);
484 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
485 }
486 #endif
487
488 #ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
489
490 /*
491 * Return 0 on success, the number of bytes not probed otherwise.
492 */
probe_subpage_writeable(const char __user * uaddr,size_t size)493 static inline size_t probe_subpage_writeable(const char __user *uaddr,
494 size_t size)
495 {
496 if (!system_supports_mte())
497 return 0;
498 return mte_probe_user_range(uaddr, size);
499 }
500
501 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
502
503 #endif /* __ASM_UACCESS_H */
504