1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/uaccess.h"
9 */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
12
13 /*
14 * User space memory access functions
15 */
16 #include <asm/asm-extable.h>
17 #include <asm/processor.h>
18 #include <asm/extable.h>
19 #include <asm/facility.h>
20 #include <asm-generic/access_ok.h>
21
22 void debug_user_asce(int exit);
23
24 unsigned long __must_check
25 raw_copy_from_user(void *to, const void __user *from, unsigned long n);
26
27 unsigned long __must_check
28 raw_copy_to_user(void __user *to, const void *from, unsigned long n);
29
30 #ifndef CONFIG_KASAN
31 #define INLINE_COPY_FROM_USER
32 #define INLINE_COPY_TO_USER
33 #endif
34
35 unsigned long __must_check
36 _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
37
38 static __always_inline unsigned long __must_check
copy_from_user_key(void * to,const void __user * from,unsigned long n,unsigned long key)39 copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key)
40 {
41 if (check_copy_size(to, n, false))
42 n = _copy_from_user_key(to, from, n, key);
43 return n;
44 }
45
46 unsigned long __must_check
47 _copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key);
48
49 static __always_inline unsigned long __must_check
copy_to_user_key(void __user * to,const void * from,unsigned long n,unsigned long key)50 copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key)
51 {
52 if (check_copy_size(from, n, true))
53 n = _copy_to_user_key(to, from, n, key);
54 return n;
55 }
56
57 union oac {
58 unsigned int val;
59 struct {
60 struct {
61 unsigned short key : 4;
62 unsigned short : 4;
63 unsigned short as : 2;
64 unsigned short : 4;
65 unsigned short k : 1;
66 unsigned short a : 1;
67 } oac1;
68 struct {
69 unsigned short key : 4;
70 unsigned short : 4;
71 unsigned short as : 2;
72 unsigned short : 4;
73 unsigned short k : 1;
74 unsigned short a : 1;
75 } oac2;
76 };
77 };
78
79 int __noreturn __put_user_bad(void);
80
81 #define __put_user_asm(to, from, size) \
82 ({ \
83 union oac __oac_spec = { \
84 .oac1.as = PSW_BITS_AS_SECONDARY, \
85 .oac1.a = 1, \
86 }; \
87 int __rc; \
88 \
89 asm volatile( \
90 " lr 0,%[spec]\n" \
91 "0: mvcos %[_to],%[_from],%[_size]\n" \
92 "1: xr %[rc],%[rc]\n" \
93 "2:\n" \
94 EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
95 EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
96 : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
97 : [_size] "d" (size), [_from] "Q" (*(from)), \
98 [spec] "d" (__oac_spec.val) \
99 : "cc", "0"); \
100 __rc; \
101 })
102
__put_user_fn(void * x,void __user * ptr,unsigned long size)103 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
104 {
105 int rc;
106
107 switch (size) {
108 case 1:
109 rc = __put_user_asm((unsigned char __user *)ptr,
110 (unsigned char *)x,
111 size);
112 break;
113 case 2:
114 rc = __put_user_asm((unsigned short __user *)ptr,
115 (unsigned short *)x,
116 size);
117 break;
118 case 4:
119 rc = __put_user_asm((unsigned int __user *)ptr,
120 (unsigned int *)x,
121 size);
122 break;
123 case 8:
124 rc = __put_user_asm((unsigned long __user *)ptr,
125 (unsigned long *)x,
126 size);
127 break;
128 default:
129 __put_user_bad();
130 break;
131 }
132 return rc;
133 }
134
135 int __noreturn __get_user_bad(void);
136
137 #define __get_user_asm(to, from, size) \
138 ({ \
139 union oac __oac_spec = { \
140 .oac2.as = PSW_BITS_AS_SECONDARY, \
141 .oac2.a = 1, \
142 }; \
143 int __rc; \
144 \
145 asm volatile( \
146 " lr 0,%[spec]\n" \
147 "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \
148 "1: xr %[rc],%[rc]\n" \
149 "2:\n" \
150 EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \
151 EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \
152 : [rc] "=&d" (__rc), "=Q" (*(to)) \
153 : [_size] "d" (size), [_from] "Q" (*(from)), \
154 [spec] "d" (__oac_spec.val), [_to] "a" (to), \
155 [_ksize] "K" (size) \
156 : "cc", "0"); \
157 __rc; \
158 })
159
__get_user_fn(void * x,const void __user * ptr,unsigned long size)160 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
161 {
162 int rc;
163
164 switch (size) {
165 case 1:
166 rc = __get_user_asm((unsigned char *)x,
167 (unsigned char __user *)ptr,
168 size);
169 break;
170 case 2:
171 rc = __get_user_asm((unsigned short *)x,
172 (unsigned short __user *)ptr,
173 size);
174 break;
175 case 4:
176 rc = __get_user_asm((unsigned int *)x,
177 (unsigned int __user *)ptr,
178 size);
179 break;
180 case 8:
181 rc = __get_user_asm((unsigned long *)x,
182 (unsigned long __user *)ptr,
183 size);
184 break;
185 default:
186 __get_user_bad();
187 break;
188 }
189 return rc;
190 }
191
192 /*
193 * These are the main single-value transfer routines. They automatically
194 * use the right size if we just have the right pointer type.
195 */
196 #define __put_user(x, ptr) \
197 ({ \
198 __typeof__(*(ptr)) __x = (x); \
199 int __pu_err = -EFAULT; \
200 \
201 __chk_user_ptr(ptr); \
202 switch (sizeof(*(ptr))) { \
203 case 1: \
204 case 2: \
205 case 4: \
206 case 8: \
207 __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \
208 break; \
209 default: \
210 __put_user_bad(); \
211 break; \
212 } \
213 __builtin_expect(__pu_err, 0); \
214 })
215
216 #define put_user(x, ptr) \
217 ({ \
218 might_fault(); \
219 __put_user(x, ptr); \
220 })
221
222 #define __get_user(x, ptr) \
223 ({ \
224 int __gu_err = -EFAULT; \
225 \
226 __chk_user_ptr(ptr); \
227 switch (sizeof(*(ptr))) { \
228 case 1: { \
229 unsigned char __x; \
230 \
231 __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
232 (x) = *(__force __typeof__(*(ptr)) *)&__x; \
233 break; \
234 }; \
235 case 2: { \
236 unsigned short __x; \
237 \
238 __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
239 (x) = *(__force __typeof__(*(ptr)) *)&__x; \
240 break; \
241 }; \
242 case 4: { \
243 unsigned int __x; \
244 \
245 __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
246 (x) = *(__force __typeof__(*(ptr)) *)&__x; \
247 break; \
248 }; \
249 case 8: { \
250 unsigned long __x; \
251 \
252 __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \
253 (x) = *(__force __typeof__(*(ptr)) *)&__x; \
254 break; \
255 }; \
256 default: \
257 __get_user_bad(); \
258 break; \
259 } \
260 __builtin_expect(__gu_err, 0); \
261 })
262
263 #define get_user(x, ptr) \
264 ({ \
265 might_fault(); \
266 __get_user(x, ptr); \
267 })
268
269 /*
270 * Copy a null terminated string from userspace.
271 */
272 long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
273
274 long __must_check strnlen_user(const char __user *src, long count);
275
276 /*
277 * Zero Userspace
278 */
279 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
280
clear_user(void __user * to,unsigned long n)281 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
282 {
283 might_fault();
284 return __clear_user(to, n);
285 }
286
287 void *s390_kernel_write(void *dst, const void *src, size_t size);
288
289 int __noreturn __put_kernel_bad(void);
290
291 #define __put_kernel_asm(val, to, insn) \
292 ({ \
293 int __rc; \
294 \
295 asm volatile( \
296 "0: " insn " %[_val],%[_to]\n" \
297 "1: xr %[rc],%[rc]\n" \
298 "2:\n" \
299 EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
300 EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
301 : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \
302 : [_val] "d" (val) \
303 : "cc"); \
304 __rc; \
305 })
306
307 #define __put_kernel_nofault(dst, src, type, err_label) \
308 do { \
309 unsigned long __x = (unsigned long)(*((type *)(src))); \
310 int __pk_err; \
311 \
312 switch (sizeof(type)) { \
313 case 1: \
314 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
315 break; \
316 case 2: \
317 __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
318 break; \
319 case 4: \
320 __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
321 break; \
322 case 8: \
323 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
324 break; \
325 default: \
326 __pk_err = __put_kernel_bad(); \
327 break; \
328 } \
329 if (unlikely(__pk_err)) \
330 goto err_label; \
331 } while (0)
332
333 int __noreturn __get_kernel_bad(void);
334
335 #define __get_kernel_asm(val, from, insn) \
336 ({ \
337 int __rc; \
338 \
339 asm volatile( \
340 "0: " insn " %[_val],%[_from]\n" \
341 "1: xr %[rc],%[rc]\n" \
342 "2:\n" \
343 EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \
344 EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \
345 : [rc] "=d" (__rc), [_val] "=d" (val) \
346 : [_from] "Q" (*(from)) \
347 : "cc"); \
348 __rc; \
349 })
350
351 #define __get_kernel_nofault(dst, src, type, err_label) \
352 do { \
353 int __gk_err; \
354 \
355 switch (sizeof(type)) { \
356 case 1: { \
357 unsigned char __x; \
358 \
359 __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
360 *((type *)(dst)) = (type)__x; \
361 break; \
362 }; \
363 case 2: { \
364 unsigned short __x; \
365 \
366 __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
367 *((type *)(dst)) = (type)__x; \
368 break; \
369 }; \
370 case 4: { \
371 unsigned int __x; \
372 \
373 __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
374 *((type *)(dst)) = (type)__x; \
375 break; \
376 }; \
377 case 8: { \
378 unsigned long __x; \
379 \
380 __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
381 *((type *)(dst)) = (type)__x; \
382 break; \
383 }; \
384 default: \
385 __gk_err = __get_kernel_bad(); \
386 break; \
387 } \
388 if (unlikely(__gk_err)) \
389 goto err_label; \
390 } while (0)
391
392 void __cmpxchg_user_key_called_with_bad_pointer(void);
393
394 #define CMPXCHG_USER_KEY_MAX_LOOPS 128
395
__cmpxchg_user_key(unsigned long address,void * uval,__uint128_t old,__uint128_t new,unsigned long key,int size)396 static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
397 __uint128_t old, __uint128_t new,
398 unsigned long key, int size)
399 {
400 int rc = 0;
401
402 switch (size) {
403 case 1: {
404 unsigned int prev, shift, mask, _old, _new;
405 unsigned long count;
406
407 shift = (3 ^ (address & 3)) << 3;
408 address ^= address & 3;
409 _old = ((unsigned int)old & 0xff) << shift;
410 _new = ((unsigned int)new & 0xff) << shift;
411 mask = ~(0xff << shift);
412 asm volatile(
413 " spka 0(%[key])\n"
414 " sacf 256\n"
415 " llill %[count],%[max_loops]\n"
416 "0: l %[prev],%[address]\n"
417 "1: nr %[prev],%[mask]\n"
418 " xilf %[mask],0xffffffff\n"
419 " or %[new],%[prev]\n"
420 " or %[prev],%[tmp]\n"
421 "2: lr %[tmp],%[prev]\n"
422 "3: cs %[prev],%[new],%[address]\n"
423 "4: jnl 5f\n"
424 " xr %[tmp],%[prev]\n"
425 " xr %[new],%[tmp]\n"
426 " nr %[tmp],%[mask]\n"
427 " jnz 5f\n"
428 " brct %[count],2b\n"
429 "5: sacf 768\n"
430 " spka %[default_key]\n"
431 EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
432 EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
433 EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
434 EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
435 : [rc] "+&d" (rc),
436 [prev] "=&d" (prev),
437 [address] "+Q" (*(int *)address),
438 [tmp] "+&d" (_old),
439 [new] "+&d" (_new),
440 [mask] "+&d" (mask),
441 [count] "=a" (count)
442 : [key] "%[count]" (key << 4),
443 [default_key] "J" (PAGE_DEFAULT_KEY),
444 [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
445 : "memory", "cc");
446 *(unsigned char *)uval = prev >> shift;
447 if (!count)
448 rc = -EAGAIN;
449 return rc;
450 }
451 case 2: {
452 unsigned int prev, shift, mask, _old, _new;
453 unsigned long count;
454
455 shift = (2 ^ (address & 2)) << 3;
456 address ^= address & 2;
457 _old = ((unsigned int)old & 0xffff) << shift;
458 _new = ((unsigned int)new & 0xffff) << shift;
459 mask = ~(0xffff << shift);
460 asm volatile(
461 " spka 0(%[key])\n"
462 " sacf 256\n"
463 " llill %[count],%[max_loops]\n"
464 "0: l %[prev],%[address]\n"
465 "1: nr %[prev],%[mask]\n"
466 " xilf %[mask],0xffffffff\n"
467 " or %[new],%[prev]\n"
468 " or %[prev],%[tmp]\n"
469 "2: lr %[tmp],%[prev]\n"
470 "3: cs %[prev],%[new],%[address]\n"
471 "4: jnl 5f\n"
472 " xr %[tmp],%[prev]\n"
473 " xr %[new],%[tmp]\n"
474 " nr %[tmp],%[mask]\n"
475 " jnz 5f\n"
476 " brct %[count],2b\n"
477 "5: sacf 768\n"
478 " spka %[default_key]\n"
479 EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
480 EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
481 EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
482 EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
483 : [rc] "+&d" (rc),
484 [prev] "=&d" (prev),
485 [address] "+Q" (*(int *)address),
486 [tmp] "+&d" (_old),
487 [new] "+&d" (_new),
488 [mask] "+&d" (mask),
489 [count] "=a" (count)
490 : [key] "%[count]" (key << 4),
491 [default_key] "J" (PAGE_DEFAULT_KEY),
492 [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
493 : "memory", "cc");
494 *(unsigned short *)uval = prev >> shift;
495 if (!count)
496 rc = -EAGAIN;
497 return rc;
498 }
499 case 4: {
500 unsigned int prev = old;
501
502 asm volatile(
503 " spka 0(%[key])\n"
504 " sacf 256\n"
505 "0: cs %[prev],%[new],%[address]\n"
506 "1: sacf 768\n"
507 " spka %[default_key]\n"
508 EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
509 EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
510 : [rc] "+&d" (rc),
511 [prev] "+&d" (prev),
512 [address] "+Q" (*(int *)address)
513 : [new] "d" ((unsigned int)new),
514 [key] "a" (key << 4),
515 [default_key] "J" (PAGE_DEFAULT_KEY)
516 : "memory", "cc");
517 *(unsigned int *)uval = prev;
518 return rc;
519 }
520 case 8: {
521 unsigned long prev = old;
522
523 asm volatile(
524 " spka 0(%[key])\n"
525 " sacf 256\n"
526 "0: csg %[prev],%[new],%[address]\n"
527 "1: sacf 768\n"
528 " spka %[default_key]\n"
529 EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
530 EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
531 : [rc] "+&d" (rc),
532 [prev] "+&d" (prev),
533 [address] "+QS" (*(long *)address)
534 : [new] "d" ((unsigned long)new),
535 [key] "a" (key << 4),
536 [default_key] "J" (PAGE_DEFAULT_KEY)
537 : "memory", "cc");
538 *(unsigned long *)uval = prev;
539 return rc;
540 }
541 case 16: {
542 __uint128_t prev = old;
543
544 asm volatile(
545 " spka 0(%[key])\n"
546 " sacf 256\n"
547 "0: cdsg %[prev],%[new],%[address]\n"
548 "1: sacf 768\n"
549 " spka %[default_key]\n"
550 EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
551 EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
552 : [rc] "+&d" (rc),
553 [prev] "+&d" (prev),
554 [address] "+QS" (*(__int128_t *)address)
555 : [new] "d" (new),
556 [key] "a" (key << 4),
557 [default_key] "J" (PAGE_DEFAULT_KEY)
558 : "memory", "cc");
559 *(__uint128_t *)uval = prev;
560 return rc;
561 }
562 }
563 __cmpxchg_user_key_called_with_bad_pointer();
564 return rc;
565 }
566
567 /**
568 * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys
569 * @ptr: User space address of value to compare to @old and exchange with
570 * @new. Must be aligned to sizeof(*@ptr).
571 * @uval: Address where the old value of *@ptr is written to.
572 * @old: Old value. Compared to the content pointed to by @ptr in order to
573 * determine if the exchange occurs. The old value read from *@ptr is
574 * written to *@uval.
575 * @new: New value to place at *@ptr.
576 * @key: Access key to use for checking storage key protection.
577 *
578 * Perform a cmpxchg on a user space target, honoring storage key protection.
579 * @key alone determines how key checking is performed, neither
580 * storage-protection-override nor fetch-protection-override apply.
581 * The caller must compare *@uval and @old to determine if values have been
582 * exchanged. In case of an exception *@uval is set to zero.
583 *
584 * Return: 0: cmpxchg executed
585 * -EFAULT: an exception happened when trying to access *@ptr
586 * -EAGAIN: maxed out number of retries (byte and short only)
587 */
588 #define cmpxchg_user_key(ptr, uval, old, new, key) \
589 ({ \
590 __typeof__(ptr) __ptr = (ptr); \
591 __typeof__(uval) __uval = (uval); \
592 \
593 BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval))); \
594 might_fault(); \
595 __chk_user_ptr(__ptr); \
596 __cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \
597 (old), (new), (key), sizeof(*(__ptr))); \
598 })
599
600 #endif /* __S390_UACCESS_H */
601