1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
4
5 #include <linux/sizes.h>
6
7 #include <asm/processor.h>
8 #include <asm/page.h>
9 #include <asm/extable.h>
10 #include <asm/kup.h>
11 #include <asm/asm-compat.h>
12
13 #ifdef __powerpc64__
14 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
15 #define TASK_SIZE_MAX TASK_SIZE_USER64
16 #endif
17
18 /* Threshold above which VMX copy path is used */
19 #define VMX_COPY_THRESHOLD 3328
20
21 #include <asm-generic/access_ok.h>
22
23 /*
24 * These are the main single-value transfer routines. They automatically
25 * use the right size if we just have the right pointer type.
26 *
27 * This gets kind of ugly. We want to return _two_ values in "get_user()"
28 * and yet we don't want to do any pointers, because that is too much
29 * of a performance impact. Thus we have a few rather ugly macros here,
30 * and hide all the ugliness from the user.
31 *
32 * The "__xxx" versions of the user access functions are versions that
33 * do not verify the address space, that must have been done previously
34 * with a separate "access_ok()" call (this is used when we do multiple
35 * accesses to the same area of user memory).
36 *
37 * As we use the same address space for kernel and user data on the
38 * PowerPC, we can just do these as direct assignments. (Of course, the
39 * exception handling means that it's no longer "just"...)
40 *
41 */
42 #define __put_user(x, ptr) \
43 ({ \
44 long __pu_err; \
45 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
46 __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \
47 __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \
48 \
49 might_fault(); \
50 do { \
51 __label__ __pu_failed; \
52 \
53 allow_user_access(__pu_addr, KUAP_WRITE); \
54 __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \
55 prevent_user_access(KUAP_WRITE); \
56 __pu_err = 0; \
57 break; \
58 \
59 __pu_failed: \
60 prevent_user_access(KUAP_WRITE); \
61 __pu_err = -EFAULT; \
62 } while (0); \
63 \
64 __pu_err; \
65 })
66
67 #define put_user(x, ptr) \
68 ({ \
69 __typeof__(*(ptr)) __user *_pu_addr = (ptr); \
70 \
71 access_ok(_pu_addr, sizeof(*(ptr))) ? \
72 __put_user(x, _pu_addr) : -EFAULT; \
73 })
74
75 /*
76 * We don't tell gcc that we are accessing memory, but this is OK
77 * because we do not write to any memory gcc knows about, so there
78 * are no aliasing issues.
79 */
80 /* -mprefixed can generate offsets beyond range, fall back hack */
81 #ifdef CONFIG_PPC_KERNEL_PREFIXED
82 #define __put_user_asm_goto(x, addr, label, op) \
83 asm goto( \
84 "1: " op " %0,0(%1) # put_user\n" \
85 EX_TABLE(1b, %l2) \
86 : \
87 : "r" (x), "b" (addr) \
88 : \
89 : label)
90 #else
91 #define __put_user_asm_goto(x, addr, label, op) \
92 asm goto( \
93 "1: " op "%U1%X1 %0,%1 # put_user\n" \
94 EX_TABLE(1b, %l2) \
95 : \
96 : "r" (x), "m<>" (*addr) \
97 : \
98 : label)
99 #endif
100
101 #ifdef __powerpc64__
102 #ifdef CONFIG_PPC_KERNEL_PREFIXED
103 #define __put_user_asm2_goto(x, ptr, label) \
104 __put_user_asm_goto(x, ptr, label, "std")
105 #else
106 #define __put_user_asm2_goto(x, addr, label) \
107 asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
108 EX_TABLE(1b, %l2) \
109 : \
110 : "r" (x), DS_FORM_CONSTRAINT (*addr) \
111 : \
112 : label)
113 #endif // CONFIG_PPC_KERNEL_PREFIXED
114 #else /* __powerpc64__ */
115 #define __put_user_asm2_goto(x, addr, label) \
116 asm goto( \
117 "1: stw%X1 %0, %1\n" \
118 "2: stw%X1 %L0, %L1\n" \
119 EX_TABLE(1b, %l2) \
120 EX_TABLE(2b, %l2) \
121 : \
122 : "r" (x), "m" (*addr) \
123 : \
124 : label)
125 #endif /* __powerpc64__ */
126
127 #define __put_user_size_goto(x, ptr, size, label) \
128 do { \
129 __typeof__(*(ptr)) __user *__pus_addr = (ptr); \
130 \
131 switch (size) { \
132 case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \
133 case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \
134 case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \
135 case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \
136 default: BUILD_BUG(); \
137 } \
138 } while (0)
139
140 /*
141 * This does an atomic 128 byte aligned load from userspace.
142 * Upto caller to do enable_kernel_vmx() before calling!
143 */
144 #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
145 __asm__ __volatile__( \
146 ".machine push\n" \
147 ".machine altivec\n" \
148 "1: lvx 0,0,%1 # get user\n" \
149 " stvx 0,0,%2 # put kernel\n" \
150 ".machine pop\n" \
151 "2:\n" \
152 ".section .fixup,\"ax\"\n" \
153 "3: li %0,%3\n" \
154 " b 2b\n" \
155 ".previous\n" \
156 EX_TABLE(1b, 3b) \
157 : "=r" (err) \
158 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
159
160 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
161
162 /* -mprefixed can generate offsets beyond range, fall back hack */
163 #ifdef CONFIG_PPC_KERNEL_PREFIXED
164 #define __get_user_asm_goto(x, addr, label, op) \
165 asm_goto_output( \
166 "1: "op" %0,0(%1) # get_user\n" \
167 EX_TABLE(1b, %l2) \
168 : "=r" (x) \
169 : "b" (addr) \
170 : \
171 : label)
172 #else
173 #define __get_user_asm_goto(x, addr, label, op) \
174 asm_goto_output( \
175 "1: "op"%U1%X1 %0, %1 # get_user\n" \
176 EX_TABLE(1b, %l2) \
177 : "=r" (x) \
178 : "m<>" (*addr) \
179 : \
180 : label)
181 #endif
182
183 #ifdef __powerpc64__
184 #ifdef CONFIG_PPC_KERNEL_PREFIXED
185 #define __get_user_asm2_goto(x, addr, label) \
186 __get_user_asm_goto(x, addr, label, "ld")
187 #else
188 #define __get_user_asm2_goto(x, addr, label) \
189 asm_goto_output( \
190 "1: ld%U1%X1 %0, %1 # get_user\n" \
191 EX_TABLE(1b, %l2) \
192 : "=r" (x) \
193 : DS_FORM_CONSTRAINT (*addr) \
194 : \
195 : label)
196 #endif // CONFIG_PPC_KERNEL_PREFIXED
197 #else /* __powerpc64__ */
198 #define __get_user_asm2_goto(x, addr, label) \
199 asm_goto_output( \
200 "1: lwz%X1 %0, %1\n" \
201 "2: lwz%X1 %L0, %L1\n" \
202 EX_TABLE(1b, %l2) \
203 EX_TABLE(2b, %l2) \
204 : "=&r" (x) \
205 : "m" (*addr) \
206 : \
207 : label)
208 #endif /* __powerpc64__ */
209
210 #define __get_user_size_goto(x, ptr, size, label) \
211 do { \
212 BUILD_BUG_ON(size > sizeof(x)); \
213 switch (size) { \
214 case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \
215 case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
216 case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
217 case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \
218 default: x = 0; BUILD_BUG(); \
219 } \
220 } while (0)
221
222 #define __get_user_size_allowed(x, ptr, size, retval) \
223 do { \
224 __label__ __gus_failed; \
225 \
226 __get_user_size_goto(x, ptr, size, __gus_failed); \
227 retval = 0; \
228 break; \
229 __gus_failed: \
230 x = 0; \
231 retval = -EFAULT; \
232 } while (0)
233
234 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
235
236 #define __get_user_asm(x, addr, err, op) \
237 __asm__ __volatile__( \
238 "1: "op"%U2%X2 %1, %2 # get_user\n" \
239 "2:\n" \
240 ".section .fixup,\"ax\"\n" \
241 "3: li %0,%3\n" \
242 " li %1,0\n" \
243 " b 2b\n" \
244 ".previous\n" \
245 EX_TABLE(1b, 3b) \
246 : "=r" (err), "=r" (x) \
247 : "m<>" (*addr), "i" (-EFAULT), "0" (err))
248
249 #ifdef __powerpc64__
250 #define __get_user_asm2(x, addr, err) \
251 __get_user_asm(x, addr, err, "ld")
252 #else /* __powerpc64__ */
253 #define __get_user_asm2(x, addr, err) \
254 __asm__ __volatile__( \
255 "1: lwz%X2 %1, %2\n" \
256 "2: lwz%X2 %L1, %L2\n" \
257 "3:\n" \
258 ".section .fixup,\"ax\"\n" \
259 "4: li %0,%3\n" \
260 " li %1,0\n" \
261 " li %L1,0\n" \
262 " b 3b\n" \
263 ".previous\n" \
264 EX_TABLE(1b, 4b) \
265 EX_TABLE(2b, 4b) \
266 : "=r" (err), "=&r" (x) \
267 : "m" (*addr), "i" (-EFAULT), "0" (err))
268 #endif /* __powerpc64__ */
269
270 #define __get_user_size_allowed(x, ptr, size, retval) \
271 do { \
272 retval = 0; \
273 BUILD_BUG_ON(size > sizeof(x)); \
274 switch (size) { \
275 case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \
276 case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \
277 case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
278 case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \
279 default: x = 0; BUILD_BUG(); \
280 } \
281 } while (0)
282
283 #define __get_user_size_goto(x, ptr, size, label) \
284 do { \
285 long __gus_retval; \
286 \
287 __get_user_size_allowed(x, ptr, size, __gus_retval); \
288 if (__gus_retval) \
289 goto label; \
290 } while (0)
291
292 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
293
294 /*
295 * This is a type: either unsigned long, if the argument fits into
296 * that type, or otherwise unsigned long long.
297 */
298 #define __long_type(x) \
299 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
300
301 #define __get_user(x, ptr) \
302 ({ \
303 long __gu_err; \
304 __long_type(*(ptr)) __gu_val; \
305 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
306 __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \
307 \
308 might_fault(); \
309 barrier_nospec(); \
310 allow_user_access(NULL, KUAP_READ); \
311 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
312 prevent_user_access(KUAP_READ); \
313 (x) = (__typeof__(*(ptr)))__gu_val; \
314 \
315 __gu_err; \
316 })
317
318 #define get_user(x, ptr) \
319 ({ \
320 __typeof__(*(ptr)) __user *_gu_addr = (ptr); \
321 \
322 access_ok(_gu_addr, sizeof(*(ptr))) ? \
323 __get_user(x, _gu_addr) : \
324 ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \
325 })
326
327 /* more complex routines */
328
329 extern unsigned long __copy_tofrom_user(void __user *to,
330 const void __user *from, unsigned long size);
331
332 unsigned long __copy_tofrom_user_base(void __user *to,
333 const void __user *from, unsigned long size);
334
335 unsigned long __copy_tofrom_user_power7_vmx(void __user *to,
336 const void __user *from, unsigned long size);
337
will_use_vmx(unsigned long n)338 static __always_inline bool will_use_vmx(unsigned long n)
339 {
340 return IS_ENABLED(CONFIG_ALTIVEC) && cpu_has_feature(CPU_FTR_VMX_COPY) &&
341 n > VMX_COPY_THRESHOLD;
342 }
343
344 static __always_inline unsigned long
raw_copy_tofrom_user(void __user * to,const void __user * from,unsigned long n,unsigned long dir)345 raw_copy_tofrom_user(void __user *to, const void __user *from,
346 unsigned long n, unsigned long dir)
347 {
348 unsigned long ret;
349
350 if (will_use_vmx(n) && enter_vmx_usercopy()) {
351 allow_user_access(to, dir);
352 ret = __copy_tofrom_user_power7_vmx(to, from, n);
353 prevent_user_access(dir);
354 exit_vmx_usercopy();
355
356 if (unlikely(ret)) {
357 allow_user_access(to, dir);
358 ret = __copy_tofrom_user_base(to, from, n);
359 prevent_user_access(dir);
360 }
361 return ret;
362 }
363
364 allow_user_access(to, dir);
365 ret = __copy_tofrom_user(to, from, n);
366 prevent_user_access(dir);
367 return ret;
368 }
369
370 #ifdef CONFIG_PPC64
371 static inline unsigned long
raw_copy_in_user(void __user * to,const void __user * from,unsigned long n)372 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
373 {
374 barrier_nospec();
375 return raw_copy_tofrom_user(to, from, n, KUAP_READ_WRITE);
376 }
377 #endif /* CONFIG_PPC64 */
378
raw_copy_from_user(void * to,const void __user * from,unsigned long n)379 static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
380 {
381 return raw_copy_tofrom_user((__force void __user *)to, from, n, KUAP_READ);
382 }
383
384 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)385 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
386 {
387 return raw_copy_tofrom_user(to, (__force const void __user *)from, n, KUAP_WRITE);
388 }
389
390 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
391
__clear_user(void __user * addr,unsigned long size)392 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
393 {
394 unsigned long ret;
395
396 might_fault();
397 allow_user_access(addr, KUAP_WRITE);
398 ret = __arch_clear_user(addr, size);
399 prevent_user_access(KUAP_WRITE);
400 return ret;
401 }
402
clear_user(void __user * addr,unsigned long size)403 static inline unsigned long clear_user(void __user *addr, unsigned long size)
404 {
405 return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
406 }
407
408 extern long strncpy_from_user(char *dst, const char __user *src, long count);
409 extern __must_check long strnlen_user(const char __user *str, long n);
410
411 #ifdef CONFIG_ARCH_HAS_COPY_MC
412 unsigned long __must_check
413 copy_mc_generic(void *to, const void *from, unsigned long size);
414
415 static inline unsigned long __must_check
copy_mc_to_kernel(void * to,const void * from,unsigned long size)416 copy_mc_to_kernel(void *to, const void *from, unsigned long size)
417 {
418 return copy_mc_generic(to, from, size);
419 }
420 #define copy_mc_to_kernel copy_mc_to_kernel
421
422 static inline unsigned long __must_check
copy_mc_to_user(void __user * to,const void * from,unsigned long n)423 copy_mc_to_user(void __user *to, const void *from, unsigned long n)
424 {
425 if (check_copy_size(from, n, true)) {
426 if (access_ok(to, n)) {
427 allow_user_access(to, KUAP_WRITE);
428 n = copy_mc_generic((void __force *)to, from, n);
429 prevent_user_access(KUAP_WRITE);
430 }
431 }
432
433 return n;
434 }
435 #endif
436
437 extern size_t copy_from_user_flushcache(void *dst, const void __user *src, size_t size);
438
__user_access_begin(const void __user * ptr,size_t len,unsigned long dir)439 static __must_check __always_inline bool __user_access_begin(const void __user *ptr, size_t len,
440 unsigned long dir)
441 {
442 if (unlikely(!access_ok(ptr, len)))
443 return false;
444
445 might_fault();
446
447 if (dir & KUAP_READ)
448 barrier_nospec();
449 allow_user_access((void __user *)ptr, dir);
450 return true;
451 }
452
453 #define user_access_begin(p, l) __user_access_begin(p, l, KUAP_READ_WRITE)
454 #define user_read_access_begin(p, l) __user_access_begin(p, l, KUAP_READ)
455 #define user_write_access_begin(p, l) __user_access_begin(p, l, KUAP_WRITE)
456
457 #define user_access_end() prevent_user_access(KUAP_READ_WRITE)
458 #define user_read_access_end() prevent_user_access(KUAP_READ)
459 #define user_write_access_end() prevent_user_access(KUAP_WRITE)
460
461 #define user_access_save prevent_user_access_return
462 #define user_access_restore restore_user_access
463
464 /*
465 * Masking the user address is an alternative to a conditional
466 * user_access_begin that can avoid the fencing. This only works
467 * for dense accesses starting at the address.
468 */
mask_user_address_simple(const void __user * ptr)469 static inline void __user *mask_user_address_simple(const void __user *ptr)
470 {
471 unsigned long addr = (unsigned long)ptr;
472 unsigned long mask = (unsigned long)(((long)addr >> (BITS_PER_LONG - 1)) & LONG_MAX);
473
474 return (void __user *)(addr & ~mask);
475 }
476
mask_user_address_isel(const void __user * ptr)477 static inline void __user *mask_user_address_isel(const void __user *ptr)
478 {
479 unsigned long addr;
480
481 asm("cmplw %1, %2; iselgt %0, %2, %1" : "=r"(addr) : "r"(ptr), "r"(TASK_SIZE) : "cr0");
482
483 return (void __user *)addr;
484 }
485
486 /* TASK_SIZE is a multiple of 128K for shifting by 17 to the right */
mask_user_address_32(const void __user * ptr)487 static inline void __user *mask_user_address_32(const void __user *ptr)
488 {
489 unsigned long addr = (unsigned long)ptr;
490 unsigned long mask = (unsigned long)((long)((TASK_SIZE >> 17) - 1 - (addr >> 17)) >> 31);
491
492 addr = (addr & ~mask) | (TASK_SIZE & mask);
493
494 return (void __user *)addr;
495 }
496
mask_user_address_fallback(const void __user * ptr)497 static inline void __user *mask_user_address_fallback(const void __user *ptr)
498 {
499 unsigned long addr = (unsigned long)ptr;
500
501 return (void __user *)(likely(addr < TASK_SIZE) ? addr : TASK_SIZE);
502 }
503
mask_user_address(const void __user * ptr)504 static inline void __user *mask_user_address(const void __user *ptr)
505 {
506 #ifdef MODULES_VADDR
507 const unsigned long border = MODULES_VADDR;
508 #else
509 const unsigned long border = PAGE_OFFSET;
510 #endif
511
512 if (IS_ENABLED(CONFIG_PPC64))
513 return mask_user_address_simple(ptr);
514 if (IS_ENABLED(CONFIG_E500))
515 return mask_user_address_isel(ptr);
516 if (TASK_SIZE <= UL(SZ_2G) && border >= UL(SZ_2G))
517 return mask_user_address_simple(ptr);
518 if (IS_ENABLED(CONFIG_PPC_BARRIER_NOSPEC))
519 return mask_user_address_32(ptr);
520 return mask_user_address_fallback(ptr);
521 }
522
__masked_user_access_begin(const void __user * p,unsigned long dir)523 static __always_inline void __user *__masked_user_access_begin(const void __user *p,
524 unsigned long dir)
525 {
526 void __user *ptr = mask_user_address(p);
527
528 might_fault();
529 allow_user_access(ptr, dir);
530
531 return ptr;
532 }
533
534 #define masked_user_access_begin(p) __masked_user_access_begin(p, KUAP_READ_WRITE)
535 #define masked_user_read_access_begin(p) __masked_user_access_begin(p, KUAP_READ)
536 #define masked_user_write_access_begin(p) __masked_user_access_begin(p, KUAP_WRITE)
537
538 #define arch_unsafe_get_user(x, p, e) do { \
539 __long_type(*(p)) __gu_val; \
540 __typeof__(*(p)) __user *__gu_addr = (p); \
541 \
542 __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
543 (x) = (__typeof__(*(p)))__gu_val; \
544 } while (0)
545
546 #define arch_unsafe_put_user(x, p, e) \
547 __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
548
549 #define unsafe_copy_from_user(d, s, l, e) \
550 do { \
551 u8 *_dst = (u8 *)(d); \
552 const u8 __user *_src = (const u8 __user *)(s); \
553 size_t _len = (l); \
554 int _i; \
555 \
556 for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
557 unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \
558 if (_len & 4) { \
559 unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \
560 _i += 4; \
561 } \
562 if (_len & 2) { \
563 unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \
564 _i += 2; \
565 } \
566 if (_len & 1) \
567 unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \
568 } while (0)
569
570 #define unsafe_copy_to_user(d, s, l, e) \
571 do { \
572 u8 __user *_dst = (u8 __user *)(d); \
573 const u8 *_src = (const u8 *)(s); \
574 size_t _len = (l); \
575 int _i; \
576 \
577 for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
578 unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
579 if (_len & 4) { \
580 unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
581 _i += 4; \
582 } \
583 if (_len & 2) { \
584 unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
585 _i += 2; \
586 } \
587 if (_len & 1) \
588 unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
589 } while (0)
590
591 #define arch_get_kernel_nofault(dst, src, type, err_label) \
592 __get_user_size_goto(*((type *)(dst)), \
593 (__force type __user *)(src), sizeof(type), err_label)
594
595 #define arch_put_kernel_nofault(dst, src, type, err_label) \
596 __put_user_size_goto(*((type *)(src)), \
597 (__force type __user *)(dst), sizeof(type), err_label)
598
599 #endif /* _ARCH_POWERPC_UACCESS_H */
600