1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com),
6  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/uaccess.h"
9  */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
12 
13 /*
14  * User space memory access functions
15  */
16 #include <linux/pgtable.h>
17 #include <asm/asm-extable.h>
18 #include <asm/processor.h>
19 #include <asm/extable.h>
20 #include <asm/facility.h>
21 #include <asm-generic/access_ok.h>
22 #include <asm/asce.h>
23 #include <linux/instrumented.h>
24 
25 void debug_user_asce(int exit);
26 
27 #ifdef CONFIG_KMSAN
28 #define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
29 #else
30 #define uaccess_kmsan_or_inline __always_inline
31 #endif
32 
33 #define INLINE_COPY_FROM_USER
34 #define INLINE_COPY_TO_USER
35 
36 static uaccess_kmsan_or_inline __must_check unsigned long
37 raw_copy_from_user(void *to, const void __user *from, unsigned long size)
38 {
39 	unsigned long osize;
40 	int cc;
41 
42 	while (1) {
43 		osize = size;
44 		asm_inline volatile(
45 			"	lhi	%%r0,%[spec]\n"
46 			"0:	mvcos	%[to],%[from],%[size]\n"
47 			"1:	nopr	%%r7\n"
48 			CC_IPM(cc)
49 			EX_TABLE_UA_MVCOS_FROM(0b, 0b)
50 			EX_TABLE_UA_MVCOS_FROM(1b, 0b)
51 			: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to)
52 			: [spec] "I" (0x81), [from] "Q" (*(const char __user *)from)
53 			: CC_CLOBBER_LIST("memory", "0"));
54 		if (__builtin_constant_p(osize) && osize <= 4096)
55 			return osize - size;
56 		if (likely(CC_TRANSFORM(cc) == 0))
57 			return osize - size;
58 		size -= 4096;
59 		to += 4096;
60 		from += 4096;
61 	}
62 }
63 
64 static uaccess_kmsan_or_inline __must_check unsigned long
65 raw_copy_to_user(void __user *to, const void *from, unsigned long size)
66 {
67 	unsigned long osize;
68 	int cc;
69 
70 	while (1) {
71 		osize = size;
72 		asm_inline volatile(
73 			"	llilh	%%r0,%[spec]\n"
74 			"0:	mvcos	%[to],%[from],%[size]\n"
75 			"1:	nopr	%%r7\n"
76 			CC_IPM(cc)
77 			EX_TABLE_UA_MVCOS_TO(0b, 0b)
78 			EX_TABLE_UA_MVCOS_TO(1b, 0b)
79 			: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
80 			: [spec] "I" (0x81), [from] "Q" (*(const char *)from)
81 			: CC_CLOBBER_LIST("memory", "0"));
82 		if (__builtin_constant_p(osize) && osize <= 4096)
83 			return osize - size;
84 		if (likely(CC_TRANSFORM(cc) == 0))
85 			return osize - size;
86 		size -= 4096;
87 		to += 4096;
88 		from += 4096;
89 	}
90 }
91 
92 unsigned long __must_check
93 _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
94 
95 static __always_inline unsigned long __must_check
96 copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key)
97 {
98 	if (check_copy_size(to, n, false))
99 		n = _copy_from_user_key(to, from, n, key);
100 	return n;
101 }
102 
103 unsigned long __must_check
104 _copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key);
105 
106 static __always_inline unsigned long __must_check
107 copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key)
108 {
109 	if (check_copy_size(from, n, true))
110 		n = _copy_to_user_key(to, from, n, key);
111 	return n;
112 }
113 
114 int __noreturn __put_user_bad(void);
115 
116 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
117 
118 #define DEFINE_PUT_USER_NOINSTR(type)					\
119 static uaccess_kmsan_or_inline int					\
120 __put_user_##type##_noinstr(unsigned type __user *to,			\
121 			    unsigned type *from,			\
122 			    unsigned long size)				\
123 {									\
124 	asm goto(							\
125 		"	llilh	%%r0,%[spec]\n"				\
126 		"0:	mvcos	%[to],%[from],%[size]\n"		\
127 		"1:	nopr	%%r7\n"					\
128 		EX_TABLE(0b, %l[Efault])				\
129 		EX_TABLE(1b, %l[Efault])				\
130 		: [to] "+Q" (*to)					\
131 		: [size] "d" (size), [from] "Q" (*from),		\
132 		  [spec] "I" (0x81)					\
133 		: "cc", "0"						\
134 		: Efault						\
135 		);							\
136 	return 0;							\
137 Efault:									\
138 	return -EFAULT;							\
139 }
140 
141 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
142 
143 #define DEFINE_PUT_USER_NOINSTR(type)					\
144 static uaccess_kmsan_or_inline int					\
145 __put_user_##type##_noinstr(unsigned type __user *to,			\
146 			    unsigned type *from,			\
147 			    unsigned long size)				\
148 {									\
149 	int rc;								\
150 									\
151 	asm_inline volatile(						\
152 		"	llilh	%%r0,%[spec]\n"				\
153 		"0:	mvcos	%[to],%[from],%[size]\n"		\
154 		"1:	lhi	%[rc],0\n"				\
155 		"2:\n"							\
156 		EX_TABLE_UA_FAULT(0b, 2b, %[rc])			\
157 		EX_TABLE_UA_FAULT(1b, 2b, %[rc])			\
158 		: [rc] "=d" (rc), [to] "+Q" (*to)			\
159 		: [size] "d" (size), [from] "Q" (*from),		\
160 		  [spec] "I" (0x81)					\
161 		: "cc", "0");						\
162 	return rc;							\
163 }
164 
165 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
166 
167 DEFINE_PUT_USER_NOINSTR(char);
168 DEFINE_PUT_USER_NOINSTR(short);
169 DEFINE_PUT_USER_NOINSTR(int);
170 DEFINE_PUT_USER_NOINSTR(long);
171 
172 #define DEFINE_PUT_USER(type)						\
173 static __always_inline int						\
174 __put_user_##type(unsigned type __user *to, unsigned type *from,	\
175 		  unsigned long size)					\
176 {									\
177 	int rc;								\
178 									\
179 	rc = __put_user_##type##_noinstr(to, from, size);		\
180 	instrument_put_user(*from, to, size);				\
181 	return rc;							\
182 }
183 
184 DEFINE_PUT_USER(char);
185 DEFINE_PUT_USER(short);
186 DEFINE_PUT_USER(int);
187 DEFINE_PUT_USER(long);
188 
189 #define __put_user(x, ptr)						\
190 ({									\
191 	__typeof__(*(ptr)) __x = (x);					\
192 	int __prc;							\
193 									\
194 	__chk_user_ptr(ptr);						\
195 	switch (sizeof(*(ptr))) {					\
196 	case 1:								\
197 		__prc = __put_user_char((unsigned char __user *)(ptr),	\
198 					(unsigned char *)&__x,		\
199 					sizeof(*(ptr)));		\
200 		break;							\
201 	case 2:								\
202 		__prc = __put_user_short((unsigned short __user *)(ptr),\
203 					 (unsigned short *)&__x,	\
204 					 sizeof(*(ptr)));		\
205 		break;							\
206 	case 4:								\
207 		__prc = __put_user_int((unsigned int __user *)(ptr),	\
208 				       (unsigned int *)&__x,		\
209 				       sizeof(*(ptr)));			\
210 		break;							\
211 	case 8:								\
212 		__prc = __put_user_long((unsigned long __user *)(ptr),	\
213 					(unsigned long *)&__x,		\
214 					sizeof(*(ptr)));		\
215 		break;							\
216 	default:							\
217 		__prc = __put_user_bad();				\
218 		break;							\
219 	}								\
220 	__builtin_expect(__prc, 0);					\
221 })
222 
223 #define put_user(x, ptr)						\
224 ({									\
225 	might_fault();							\
226 	__put_user(x, ptr);						\
227 })
228 
229 int __noreturn __get_user_bad(void);
230 
231 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
232 
233 #define DEFINE_GET_USER_NOINSTR(type)					\
234 static uaccess_kmsan_or_inline int					\
235 __get_user_##type##_noinstr(unsigned type *to,				\
236 			    const unsigned type __user *from,		\
237 			    unsigned long size)				\
238 {									\
239 	asm goto(							\
240 		"	lhi	%%r0,%[spec]\n"				\
241 		"0:	mvcos	%[to],%[from],%[size]\n"		\
242 		"1:	nopr	%%r7\n"					\
243 		EX_TABLE(0b, %l[Efault])				\
244 		EX_TABLE(1b, %l[Efault])				\
245 		: [to] "=Q" (*to)					\
246 		: [size] "d" (size), [from] "Q" (*from),		\
247 		  [spec] "I" (0x81)					\
248 		: "cc", "0"						\
249 		: Efault						\
250 		);							\
251 	return 0;							\
252 Efault:									\
253 	*to = 0;							\
254 	return -EFAULT;							\
255 }
256 
257 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
258 
259 #define DEFINE_GET_USER_NOINSTR(type)					\
260 static uaccess_kmsan_or_inline int					\
261 __get_user_##type##_noinstr(unsigned type *to,				\
262 			    const unsigned type __user *from,		\
263 			    unsigned long size)				\
264 {									\
265 	int rc;								\
266 									\
267 	asm_inline volatile(						\
268 		"	lhi	%%r0,%[spec]\n"				\
269 		"0:	mvcos	%[to],%[from],%[size]\n"		\
270 		"1:	lhi	%[rc],0\n"				\
271 		"2:\n"							\
272 		EX_TABLE_UA_FAULT(0b, 2b, %[rc])			\
273 		EX_TABLE_UA_FAULT(1b, 2b, %[rc])			\
274 		: [rc] "=d" (rc), [to] "=Q" (*to)			\
275 		: [size] "d" (size), [from] "Q" (*from),		\
276 		  [spec] "I" (0x81)					\
277 		: "cc", "0");						\
278 	if (likely(!rc))						\
279 		return 0;						\
280 	*to = 0;							\
281 	return rc;							\
282 }
283 
284 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
285 
286 DEFINE_GET_USER_NOINSTR(char);
287 DEFINE_GET_USER_NOINSTR(short);
288 DEFINE_GET_USER_NOINSTR(int);
289 DEFINE_GET_USER_NOINSTR(long);
290 
291 #define DEFINE_GET_USER(type)						\
292 static __always_inline int						\
293 __get_user_##type(unsigned type *to, const unsigned type __user *from,	\
294 		  unsigned long size)					\
295 {									\
296 	int rc;								\
297 									\
298 	rc = __get_user_##type##_noinstr(to, from, size);		\
299 	instrument_get_user(*to);					\
300 	return rc;							\
301 }
302 
303 DEFINE_GET_USER(char);
304 DEFINE_GET_USER(short);
305 DEFINE_GET_USER(int);
306 DEFINE_GET_USER(long);
307 
308 #define __get_user(x, ptr)						\
309 ({									\
310 	const __user void *____guptr = (ptr);				\
311 	int __grc;							\
312 									\
313 	__chk_user_ptr(ptr);						\
314 	switch (sizeof(*(ptr))) {					\
315 	case 1: {							\
316 		const unsigned char __user *__guptr = ____guptr;	\
317 		unsigned char __x;					\
318 									\
319 		__grc = __get_user_char(&__x, __guptr, sizeof(*(ptr)));	\
320 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
321 		break;							\
322 	};								\
323 	case 2: {							\
324 		const unsigned short __user *__guptr = ____guptr;	\
325 		unsigned short __x;					\
326 									\
327 		__grc = __get_user_short(&__x, __guptr, sizeof(*(ptr)));\
328 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
329 		break;							\
330 	};								\
331 	case 4: {							\
332 		const unsigned int __user *__guptr = ____guptr;		\
333 		unsigned int __x;					\
334 									\
335 		__grc = __get_user_int(&__x, __guptr, sizeof(*(ptr)));	\
336 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
337 		break;							\
338 	};								\
339 	case 8: {							\
340 		const unsigned long __user *__guptr = ____guptr;	\
341 		unsigned long __x;					\
342 									\
343 		__grc = __get_user_long(&__x, __guptr, sizeof(*(ptr)));	\
344 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
345 		break;							\
346 	};								\
347 	default:							\
348 		__grc = __get_user_bad();				\
349 		break;							\
350 	}								\
351 	__builtin_expect(__grc, 0);					\
352 })
353 
354 #define get_user(x, ptr)						\
355 ({									\
356 	might_fault();							\
357 	__get_user(x, ptr);						\
358 })
359 
360 /*
361  * Copy a null terminated string from userspace.
362  */
363 long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
364 
365 long __must_check strnlen_user(const char __user *src, long count);
366 
367 static uaccess_kmsan_or_inline __must_check unsigned long
368 __clear_user(void __user *to, unsigned long size)
369 {
370 	unsigned long osize;
371 	int cc;
372 
373 	while (1) {
374 		osize = size;
375 		asm_inline volatile(
376 			"	llilh	%%r0,%[spec]\n"
377 			"0:	mvcos	%[to],%[from],%[size]\n"
378 			"1:	nopr	%%r7\n"
379 			CC_IPM(cc)
380 			EX_TABLE_UA_MVCOS_TO(0b, 0b)
381 			EX_TABLE_UA_MVCOS_TO(1b, 0b)
382 			: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
383 			: [spec] "I" (0x81), [from] "Q" (*(const char *)empty_zero_page)
384 			: CC_CLOBBER_LIST("memory", "0"));
385 		if (__builtin_constant_p(osize) && osize <= 4096)
386 			return osize - size;
387 		if (CC_TRANSFORM(cc) == 0)
388 			return osize - size;
389 		size -= 4096;
390 		to += 4096;
391 	}
392 }
393 
394 static __always_inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
395 {
396 	might_fault();
397 	return __clear_user(to, n);
398 }
399 
400 void *__s390_kernel_write(void *dst, const void *src, size_t size);
401 
402 static inline void *s390_kernel_write(void *dst, const void *src, size_t size)
403 {
404 	if (__is_defined(__DECOMPRESSOR))
405 		return memcpy(dst, src, size);
406 	return __s390_kernel_write(dst, src, size);
407 }
408 
409 void __noreturn __mvc_kernel_nofault_bad(void);
410 
411 #if defined(CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && defined(CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS)
412 
413 #define __mvc_kernel_nofault(dst, src, type, err_label)			\
414 do {									\
415 	switch (sizeof(type)) {						\
416 	case 1:								\
417 	case 2:								\
418 	case 4:								\
419 	case 8:								\
420 		asm goto(						\
421 			"0:	mvc	%O[_dst](%[_len],%R[_dst]),%[_src]\n" \
422 			"1:	nopr	%%r7\n"				\
423 			EX_TABLE(0b, %l[err_label])			\
424 			EX_TABLE(1b, %l[err_label])			\
425 			: [_dst] "=Q" (*(type *)dst)			\
426 			: [_src] "Q" (*(type *)(src)),			\
427 			  [_len] "I" (sizeof(type))			\
428 			:						\
429 			: err_label);					\
430 		break;							\
431 	default:							\
432 		__mvc_kernel_nofault_bad();				\
433 		break;							\
434 	}								\
435 } while (0)
436 
437 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
438 
439 #define __mvc_kernel_nofault(dst, src, type, err_label)			\
440 do {									\
441 	type *(__dst) = (type *)(dst);					\
442 	int __rc;							\
443 									\
444 	switch (sizeof(type)) {						\
445 	case 1:								\
446 	case 2:								\
447 	case 4:								\
448 	case 8:								\
449 		asm_inline volatile(					\
450 			"0:	mvc	0(%[_len],%[_dst]),%[_src]\n"	\
451 			"1:	lhi	%[_rc],0\n"			\
452 			"2:\n"						\
453 			EX_TABLE_UA_FAULT(0b, 2b, %[_rc])		\
454 			EX_TABLE_UA_FAULT(1b, 2b, %[_rc])		\
455 			: [_rc] "=d" (__rc),				\
456 			  "=m" (*__dst)					\
457 			: [_src] "Q" (*(type *)(src)),			\
458 			[_dst] "a" (__dst),				\
459 			[_len] "I" (sizeof(type)));			\
460 		if (__rc)						\
461 			goto err_label;					\
462 		break;							\
463 	default:							\
464 		__mvc_kernel_nofault_bad();				\
465 		break;							\
466 	}								\
467 } while (0)
468 
469 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
470 
471 #define __get_kernel_nofault __mvc_kernel_nofault
472 #define __put_kernel_nofault __mvc_kernel_nofault
473 
474 void __cmpxchg_user_key_called_with_bad_pointer(void);
475 
476 #define CMPXCHG_USER_KEY_MAX_LOOPS 128
477 
478 static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
479 					      __uint128_t old, __uint128_t new,
480 					      unsigned long key, int size)
481 {
482 	bool sacf_flag;
483 	int rc = 0;
484 
485 	switch (size) {
486 	case 1: {
487 		unsigned int prev, shift, mask, _old, _new;
488 		unsigned long count;
489 
490 		shift = (3 ^ (address & 3)) << 3;
491 		address ^= address & 3;
492 		_old = ((unsigned int)old & 0xff) << shift;
493 		_new = ((unsigned int)new & 0xff) << shift;
494 		mask = ~(0xff << shift);
495 		sacf_flag = enable_sacf_uaccess();
496 		asm_inline volatile(
497 			"	spka	0(%[key])\n"
498 			"	sacf	256\n"
499 			"	llill	%[count],%[max_loops]\n"
500 			"0:	l	%[prev],%[address]\n"
501 			"1:	nr	%[prev],%[mask]\n"
502 			"	xilf	%[mask],0xffffffff\n"
503 			"	or	%[new],%[prev]\n"
504 			"	or	%[prev],%[tmp]\n"
505 			"2:	lr	%[tmp],%[prev]\n"
506 			"3:	cs	%[prev],%[new],%[address]\n"
507 			"4:	jnl	5f\n"
508 			"	xr	%[tmp],%[prev]\n"
509 			"	xr	%[new],%[tmp]\n"
510 			"	nr	%[tmp],%[mask]\n"
511 			"	jnz	5f\n"
512 			"	brct	%[count],2b\n"
513 			"5:	sacf	768\n"
514 			"	spka	%[default_key]\n"
515 			EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
516 			EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
517 			EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
518 			EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
519 			: [rc] "+&d" (rc),
520 			  [prev] "=&d" (prev),
521 			  [address] "+Q" (*(int *)address),
522 			  [tmp] "+&d" (_old),
523 			  [new] "+&d" (_new),
524 			  [mask] "+&d" (mask),
525 			  [count] "=a" (count)
526 			: [key] "%[count]" (key << 4),
527 			  [default_key] "J" (PAGE_DEFAULT_KEY),
528 			  [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
529 			: "memory", "cc");
530 		disable_sacf_uaccess(sacf_flag);
531 		*(unsigned char *)uval = prev >> shift;
532 		if (!count)
533 			rc = -EAGAIN;
534 		return rc;
535 	}
536 	case 2: {
537 		unsigned int prev, shift, mask, _old, _new;
538 		unsigned long count;
539 
540 		shift = (2 ^ (address & 2)) << 3;
541 		address ^= address & 2;
542 		_old = ((unsigned int)old & 0xffff) << shift;
543 		_new = ((unsigned int)new & 0xffff) << shift;
544 		mask = ~(0xffff << shift);
545 		sacf_flag = enable_sacf_uaccess();
546 		asm_inline volatile(
547 			"	spka	0(%[key])\n"
548 			"	sacf	256\n"
549 			"	llill	%[count],%[max_loops]\n"
550 			"0:	l	%[prev],%[address]\n"
551 			"1:	nr	%[prev],%[mask]\n"
552 			"	xilf	%[mask],0xffffffff\n"
553 			"	or	%[new],%[prev]\n"
554 			"	or	%[prev],%[tmp]\n"
555 			"2:	lr	%[tmp],%[prev]\n"
556 			"3:	cs	%[prev],%[new],%[address]\n"
557 			"4:	jnl	5f\n"
558 			"	xr	%[tmp],%[prev]\n"
559 			"	xr	%[new],%[tmp]\n"
560 			"	nr	%[tmp],%[mask]\n"
561 			"	jnz	5f\n"
562 			"	brct	%[count],2b\n"
563 			"5:	sacf	768\n"
564 			"	spka	%[default_key]\n"
565 			EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
566 			EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
567 			EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
568 			EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
569 			: [rc] "+&d" (rc),
570 			  [prev] "=&d" (prev),
571 			  [address] "+Q" (*(int *)address),
572 			  [tmp] "+&d" (_old),
573 			  [new] "+&d" (_new),
574 			  [mask] "+&d" (mask),
575 			  [count] "=a" (count)
576 			: [key] "%[count]" (key << 4),
577 			  [default_key] "J" (PAGE_DEFAULT_KEY),
578 			  [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
579 			: "memory", "cc");
580 		disable_sacf_uaccess(sacf_flag);
581 		*(unsigned short *)uval = prev >> shift;
582 		if (!count)
583 			rc = -EAGAIN;
584 		return rc;
585 	}
586 	case 4:	{
587 		unsigned int prev = old;
588 
589 		sacf_flag = enable_sacf_uaccess();
590 		asm_inline volatile(
591 			"	spka	0(%[key])\n"
592 			"	sacf	256\n"
593 			"0:	cs	%[prev],%[new],%[address]\n"
594 			"1:	sacf	768\n"
595 			"	spka	%[default_key]\n"
596 			EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
597 			EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
598 			: [rc] "+&d" (rc),
599 			  [prev] "+&d" (prev),
600 			  [address] "+Q" (*(int *)address)
601 			: [new] "d" ((unsigned int)new),
602 			  [key] "a" (key << 4),
603 			  [default_key] "J" (PAGE_DEFAULT_KEY)
604 			: "memory", "cc");
605 		disable_sacf_uaccess(sacf_flag);
606 		*(unsigned int *)uval = prev;
607 		return rc;
608 	}
609 	case 8: {
610 		unsigned long prev = old;
611 
612 		sacf_flag = enable_sacf_uaccess();
613 		asm_inline volatile(
614 			"	spka	0(%[key])\n"
615 			"	sacf	256\n"
616 			"0:	csg	%[prev],%[new],%[address]\n"
617 			"1:	sacf	768\n"
618 			"	spka	%[default_key]\n"
619 			EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
620 			EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
621 			: [rc] "+&d" (rc),
622 			  [prev] "+&d" (prev),
623 			  [address] "+QS" (*(long *)address)
624 			: [new] "d" ((unsigned long)new),
625 			  [key] "a" (key << 4),
626 			  [default_key] "J" (PAGE_DEFAULT_KEY)
627 			: "memory", "cc");
628 		disable_sacf_uaccess(sacf_flag);
629 		*(unsigned long *)uval = prev;
630 		return rc;
631 	}
632 	case 16: {
633 		__uint128_t prev = old;
634 
635 		sacf_flag = enable_sacf_uaccess();
636 		asm_inline volatile(
637 			"	spka	0(%[key])\n"
638 			"	sacf	256\n"
639 			"0:	cdsg	%[prev],%[new],%[address]\n"
640 			"1:	sacf	768\n"
641 			"	spka	%[default_key]\n"
642 			EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
643 			EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
644 			: [rc] "+&d" (rc),
645 			  [prev] "+&d" (prev),
646 			  [address] "+QS" (*(__int128_t *)address)
647 			: [new] "d" (new),
648 			  [key] "a" (key << 4),
649 			  [default_key] "J" (PAGE_DEFAULT_KEY)
650 			: "memory", "cc");
651 		disable_sacf_uaccess(sacf_flag);
652 		*(__uint128_t *)uval = prev;
653 		return rc;
654 	}
655 	}
656 	__cmpxchg_user_key_called_with_bad_pointer();
657 	return rc;
658 }
659 
660 /**
661  * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys
662  * @ptr: User space address of value to compare to @old and exchange with
663  *	 @new. Must be aligned to sizeof(*@ptr).
664  * @uval: Address where the old value of *@ptr is written to.
665  * @old: Old value. Compared to the content pointed to by @ptr in order to
666  *	 determine if the exchange occurs. The old value read from *@ptr is
667  *	 written to *@uval.
668  * @new: New value to place at *@ptr.
669  * @key: Access key to use for checking storage key protection.
670  *
671  * Perform a cmpxchg on a user space target, honoring storage key protection.
672  * @key alone determines how key checking is performed, neither
673  * storage-protection-override nor fetch-protection-override apply.
674  * The caller must compare *@uval and @old to determine if values have been
675  * exchanged. In case of an exception *@uval is set to zero.
676  *
677  * Return:     0: cmpxchg executed
678  *	       -EFAULT: an exception happened when trying to access *@ptr
679  *	       -EAGAIN: maxed out number of retries (byte and short only)
680  */
681 #define cmpxchg_user_key(ptr, uval, old, new, key)			\
682 ({									\
683 	__typeof__(ptr) __ptr = (ptr);					\
684 	__typeof__(uval) __uval = (uval);				\
685 									\
686 	BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval)));		\
687 	might_fault();							\
688 	__chk_user_ptr(__ptr);						\
689 	__cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval),	\
690 			   (old), (new), (key), sizeof(*(__ptr)));	\
691 })
692 
693 #endif /* __S390_UACCESS_H */
694