1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com),
6  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/uaccess.h"
9  */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
12 
13 /*
14  * User space memory access functions
15  */
16 #include <linux/pgtable.h>
17 #include <asm/asm-extable.h>
18 #include <asm/processor.h>
19 #include <asm/extable.h>
20 #include <asm/facility.h>
21 #include <asm-generic/access_ok.h>
22 #include <linux/instrumented.h>
23 
24 void debug_user_asce(int exit);
25 
26 #ifdef CONFIG_KMSAN
27 #define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
28 #else
29 #define uaccess_kmsan_or_inline __always_inline
30 #endif
31 
32 #define INLINE_COPY_FROM_USER
33 #define INLINE_COPY_TO_USER
34 
35 static uaccess_kmsan_or_inline __must_check unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long size)36 raw_copy_from_user(void *to, const void __user *from, unsigned long size)
37 {
38 	unsigned long osize;
39 	int cc;
40 
41 	while (1) {
42 		osize = size;
43 		asm_inline volatile(
44 			"	lhi	%%r0,%[spec]\n"
45 			"0:	mvcos	%[to],%[from],%[size]\n"
46 			"1:	nopr	%%r7\n"
47 			CC_IPM(cc)
48 			EX_TABLE_UA_MVCOS_FROM(0b, 0b)
49 			EX_TABLE_UA_MVCOS_FROM(1b, 0b)
50 			: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to)
51 			: [spec] "I" (0x81), [from] "Q" (*(const char __user *)from)
52 			: CC_CLOBBER_LIST("memory", "0"));
53 		if (__builtin_constant_p(osize) && osize <= 4096)
54 			return osize - size;
55 		if (likely(CC_TRANSFORM(cc) == 0))
56 			return osize - size;
57 		size -= 4096;
58 		to += 4096;
59 		from += 4096;
60 	}
61 }
62 
63 static uaccess_kmsan_or_inline __must_check unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long size)64 raw_copy_to_user(void __user *to, const void *from, unsigned long size)
65 {
66 	unsigned long osize;
67 	int cc;
68 
69 	while (1) {
70 		osize = size;
71 		asm_inline volatile(
72 			"	llilh	%%r0,%[spec]\n"
73 			"0:	mvcos	%[to],%[from],%[size]\n"
74 			"1:	nopr	%%r7\n"
75 			CC_IPM(cc)
76 			EX_TABLE_UA_MVCOS_TO(0b, 0b)
77 			EX_TABLE_UA_MVCOS_TO(1b, 0b)
78 			: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
79 			: [spec] "I" (0x81), [from] "Q" (*(const char *)from)
80 			: CC_CLOBBER_LIST("memory", "0"));
81 		if (__builtin_constant_p(osize) && osize <= 4096)
82 			return osize - size;
83 		if (likely(CC_TRANSFORM(cc) == 0))
84 			return osize - size;
85 		size -= 4096;
86 		to += 4096;
87 		from += 4096;
88 	}
89 }
90 
91 unsigned long __must_check
92 _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
93 
94 static __always_inline unsigned long __must_check
copy_from_user_key(void * to,const void __user * from,unsigned long n,unsigned long key)95 copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key)
96 {
97 	if (check_copy_size(to, n, false))
98 		n = _copy_from_user_key(to, from, n, key);
99 	return n;
100 }
101 
102 unsigned long __must_check
103 _copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key);
104 
105 static __always_inline unsigned long __must_check
copy_to_user_key(void __user * to,const void * from,unsigned long n,unsigned long key)106 copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key)
107 {
108 	if (check_copy_size(from, n, true))
109 		n = _copy_to_user_key(to, from, n, key);
110 	return n;
111 }
112 
113 int __noreturn __put_user_bad(void);
114 
115 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
116 
117 #define DEFINE_PUT_USER_NOINSTR(type)					\
118 static uaccess_kmsan_or_inline int					\
119 __put_user_##type##_noinstr(unsigned type __user *to,			\
120 			    unsigned type *from,			\
121 			    unsigned long size)				\
122 {									\
123 	asm goto(							\
124 		"	llilh	%%r0,%[spec]\n"				\
125 		"0:	mvcos	%[to],%[from],%[size]\n"		\
126 		"1:	nopr	%%r7\n"					\
127 		EX_TABLE(0b, %l[Efault])				\
128 		EX_TABLE(1b, %l[Efault])				\
129 		: [to] "+Q" (*to)					\
130 		: [size] "d" (size), [from] "Q" (*from),		\
131 		  [spec] "I" (0x81)					\
132 		: "cc", "0"						\
133 		: Efault						\
134 		);							\
135 	return 0;							\
136 Efault:									\
137 	return -EFAULT;							\
138 }
139 
140 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
141 
142 #define DEFINE_PUT_USER_NOINSTR(type)					\
143 static uaccess_kmsan_or_inline int					\
144 __put_user_##type##_noinstr(unsigned type __user *to,			\
145 			    unsigned type *from,			\
146 			    unsigned long size)				\
147 {									\
148 	int rc;								\
149 									\
150 	asm_inline volatile(						\
151 		"	llilh	%%r0,%[spec]\n"				\
152 		"0:	mvcos	%[to],%[from],%[size]\n"		\
153 		"1:	lhi	%[rc],0\n"				\
154 		"2:\n"							\
155 		EX_TABLE_UA_FAULT(0b, 2b, %[rc])			\
156 		EX_TABLE_UA_FAULT(1b, 2b, %[rc])			\
157 		: [rc] "=d" (rc), [to] "+Q" (*to)			\
158 		: [size] "d" (size), [from] "Q" (*from),		\
159 		  [spec] "I" (0x81)					\
160 		: "cc", "0");						\
161 	return rc;							\
162 }
163 
164 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
165 
166 DEFINE_PUT_USER_NOINSTR(char);
167 DEFINE_PUT_USER_NOINSTR(short);
168 DEFINE_PUT_USER_NOINSTR(int);
169 DEFINE_PUT_USER_NOINSTR(long);
170 
171 #define DEFINE_PUT_USER(type)						\
172 static __always_inline int						\
173 __put_user_##type(unsigned type __user *to, unsigned type *from,	\
174 		  unsigned long size)					\
175 {									\
176 	int rc;								\
177 									\
178 	rc = __put_user_##type##_noinstr(to, from, size);		\
179 	instrument_put_user(*from, to, size);				\
180 	return rc;							\
181 }
182 
183 DEFINE_PUT_USER(char);
184 DEFINE_PUT_USER(short);
185 DEFINE_PUT_USER(int);
186 DEFINE_PUT_USER(long);
187 
188 #define __put_user(x, ptr)						\
189 ({									\
190 	__typeof__(*(ptr)) __x = (x);					\
191 	int __prc;							\
192 									\
193 	__chk_user_ptr(ptr);						\
194 	switch (sizeof(*(ptr))) {					\
195 	case 1:								\
196 		__prc = __put_user_char((unsigned char __user *)(ptr),	\
197 					(unsigned char *)&__x,		\
198 					sizeof(*(ptr)));		\
199 		break;							\
200 	case 2:								\
201 		__prc = __put_user_short((unsigned short __user *)(ptr),\
202 					 (unsigned short *)&__x,	\
203 					 sizeof(*(ptr)));		\
204 		break;							\
205 	case 4:								\
206 		__prc = __put_user_int((unsigned int __user *)(ptr),	\
207 				       (unsigned int *)&__x,		\
208 				       sizeof(*(ptr)));			\
209 		break;							\
210 	case 8:								\
211 		__prc = __put_user_long((unsigned long __user *)(ptr),	\
212 					(unsigned long *)&__x,		\
213 					sizeof(*(ptr)));		\
214 		break;							\
215 	default:							\
216 		__prc = __put_user_bad();				\
217 		break;							\
218 	}								\
219 	__builtin_expect(__prc, 0);					\
220 })
221 
222 #define put_user(x, ptr)						\
223 ({									\
224 	might_fault();							\
225 	__put_user(x, ptr);						\
226 })
227 
228 int __noreturn __get_user_bad(void);
229 
230 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
231 
232 #define DEFINE_GET_USER_NOINSTR(type)					\
233 static uaccess_kmsan_or_inline int					\
234 __get_user_##type##_noinstr(unsigned type *to,				\
235 			    const unsigned type __user *from,		\
236 			    unsigned long size)				\
237 {									\
238 	asm goto(							\
239 		"	lhi	%%r0,%[spec]\n"				\
240 		"0:	mvcos	%[to],%[from],%[size]\n"		\
241 		"1:	nopr	%%r7\n"					\
242 		EX_TABLE(0b, %l[Efault])				\
243 		EX_TABLE(1b, %l[Efault])				\
244 		: [to] "=Q" (*to)					\
245 		: [size] "d" (size), [from] "Q" (*from),		\
246 		  [spec] "I" (0x81)					\
247 		: "cc", "0"						\
248 		: Efault						\
249 		);							\
250 	return 0;							\
251 Efault:									\
252 	*to = 0;							\
253 	return -EFAULT;							\
254 }
255 
256 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
257 
258 #define DEFINE_GET_USER_NOINSTR(type)					\
259 static uaccess_kmsan_or_inline int					\
260 __get_user_##type##_noinstr(unsigned type *to,				\
261 			    const unsigned type __user *from,		\
262 			    unsigned long size)				\
263 {									\
264 	int rc;								\
265 									\
266 	asm_inline volatile(						\
267 		"	lhi	%%r0,%[spec]\n"				\
268 		"0:	mvcos	%[to],%[from],%[size]\n"		\
269 		"1:	lhi	%[rc],0\n"				\
270 		"2:\n"							\
271 		EX_TABLE_UA_FAULT(0b, 2b, %[rc])			\
272 		EX_TABLE_UA_FAULT(1b, 2b, %[rc])			\
273 		: [rc] "=d" (rc), [to] "=Q" (*to)			\
274 		: [size] "d" (size), [from] "Q" (*from),		\
275 		  [spec] "I" (0x81)					\
276 		: "cc", "0");						\
277 	if (likely(!rc))						\
278 		return 0;						\
279 	*to = 0;							\
280 	return rc;							\
281 }
282 
283 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
284 
285 DEFINE_GET_USER_NOINSTR(char);
286 DEFINE_GET_USER_NOINSTR(short);
287 DEFINE_GET_USER_NOINSTR(int);
288 DEFINE_GET_USER_NOINSTR(long);
289 
290 #define DEFINE_GET_USER(type)						\
291 static __always_inline int						\
292 __get_user_##type(unsigned type *to, const unsigned type __user *from,	\
293 		  unsigned long size)					\
294 {									\
295 	int rc;								\
296 									\
297 	rc = __get_user_##type##_noinstr(to, from, size);		\
298 	instrument_get_user(*to);					\
299 	return rc;							\
300 }
301 
302 DEFINE_GET_USER(char);
303 DEFINE_GET_USER(short);
304 DEFINE_GET_USER(int);
305 DEFINE_GET_USER(long);
306 
307 #define __get_user(x, ptr)						\
308 ({									\
309 	const __user void *____guptr = (ptr);				\
310 	int __grc;							\
311 									\
312 	__chk_user_ptr(ptr);						\
313 	switch (sizeof(*(ptr))) {					\
314 	case 1: {							\
315 		const unsigned char __user *__guptr = ____guptr;	\
316 		unsigned char __x;					\
317 									\
318 		__grc = __get_user_char(&__x, __guptr, sizeof(*(ptr)));	\
319 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
320 		break;							\
321 	};								\
322 	case 2: {							\
323 		const unsigned short __user *__guptr = ____guptr;	\
324 		unsigned short __x;					\
325 									\
326 		__grc = __get_user_short(&__x, __guptr, sizeof(*(ptr)));\
327 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
328 		break;							\
329 	};								\
330 	case 4: {							\
331 		const unsigned int __user *__guptr = ____guptr;		\
332 		unsigned int __x;					\
333 									\
334 		__grc = __get_user_int(&__x, __guptr, sizeof(*(ptr)));	\
335 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
336 		break;							\
337 	};								\
338 	case 8: {							\
339 		const unsigned long __user *__guptr = ____guptr;	\
340 		unsigned long __x;					\
341 									\
342 		__grc = __get_user_long(&__x, __guptr, sizeof(*(ptr)));	\
343 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
344 		break;							\
345 	};								\
346 	default:							\
347 		__grc = __get_user_bad();				\
348 		break;							\
349 	}								\
350 	__builtin_expect(__grc, 0);					\
351 })
352 
353 #define get_user(x, ptr)						\
354 ({									\
355 	might_fault();							\
356 	__get_user(x, ptr);						\
357 })
358 
359 /*
360  * Copy a null terminated string from userspace.
361  */
362 long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
363 
364 long __must_check strnlen_user(const char __user *src, long count);
365 
366 static uaccess_kmsan_or_inline __must_check unsigned long
__clear_user(void __user * to,unsigned long size)367 __clear_user(void __user *to, unsigned long size)
368 {
369 	unsigned long osize;
370 	int cc;
371 
372 	while (1) {
373 		osize = size;
374 		asm_inline volatile(
375 			"	llilh	%%r0,%[spec]\n"
376 			"0:	mvcos	%[to],%[from],%[size]\n"
377 			"1:	nopr	%%r7\n"
378 			CC_IPM(cc)
379 			EX_TABLE_UA_MVCOS_TO(0b, 0b)
380 			EX_TABLE_UA_MVCOS_TO(1b, 0b)
381 			: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
382 			: [spec] "I" (0x81), [from] "Q" (*(const char *)empty_zero_page)
383 			: CC_CLOBBER_LIST("memory", "0"));
384 		if (__builtin_constant_p(osize) && osize <= 4096)
385 			return osize - size;
386 		if (CC_TRANSFORM(cc) == 0)
387 			return osize - size;
388 		size -= 4096;
389 		to += 4096;
390 	}
391 }
392 
clear_user(void __user * to,unsigned long n)393 static __always_inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
394 {
395 	might_fault();
396 	return __clear_user(to, n);
397 }
398 
399 void *__s390_kernel_write(void *dst, const void *src, size_t size);
400 
s390_kernel_write(void * dst,const void * src,size_t size)401 static inline void *s390_kernel_write(void *dst, const void *src, size_t size)
402 {
403 	if (__is_defined(__DECOMPRESSOR))
404 		return memcpy(dst, src, size);
405 	return __s390_kernel_write(dst, src, size);
406 }
407 
408 void __noreturn __mvc_kernel_nofault_bad(void);
409 
410 #if defined(CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && defined(CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS)
411 
412 #define __mvc_kernel_nofault(dst, src, type, err_label)			\
413 do {									\
414 	switch (sizeof(type)) {						\
415 	case 1:								\
416 	case 2:								\
417 	case 4:								\
418 	case 8:								\
419 		asm goto(						\
420 			"0:	mvc	%O[_dst](%[_len],%R[_dst]),%[_src]\n" \
421 			"1:	nopr	%%r7\n"				\
422 			EX_TABLE(0b, %l[err_label])			\
423 			EX_TABLE(1b, %l[err_label])			\
424 			: [_dst] "=Q" (*(type *)dst)			\
425 			: [_src] "Q" (*(type *)(src)),			\
426 			  [_len] "I" (sizeof(type))			\
427 			:						\
428 			: err_label);					\
429 		break;							\
430 	default:							\
431 		__mvc_kernel_nofault_bad();				\
432 		break;							\
433 	}								\
434 } while (0)
435 
436 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
437 
438 #define __mvc_kernel_nofault(dst, src, type, err_label)			\
439 do {									\
440 	type *(__dst) = (type *)(dst);					\
441 	int __rc;							\
442 									\
443 	switch (sizeof(type)) {						\
444 	case 1:								\
445 	case 2:								\
446 	case 4:								\
447 	case 8:								\
448 		asm_inline volatile(					\
449 			"0:	mvc	0(%[_len],%[_dst]),%[_src]\n"	\
450 			"1:	lhi	%[_rc],0\n"			\
451 			"2:\n"						\
452 			EX_TABLE_UA_FAULT(0b, 2b, %[_rc])		\
453 			EX_TABLE_UA_FAULT(1b, 2b, %[_rc])		\
454 			: [_rc] "=d" (__rc),				\
455 			  "=m" (*__dst)					\
456 			: [_src] "Q" (*(type *)(src)),			\
457 			[_dst] "a" (__dst),				\
458 			[_len] "I" (sizeof(type)));			\
459 		if (__rc)						\
460 			goto err_label;					\
461 		break;							\
462 	default:							\
463 		__mvc_kernel_nofault_bad();				\
464 		break;							\
465 	}								\
466 } while (0)
467 
468 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
469 
470 #define __get_kernel_nofault __mvc_kernel_nofault
471 #define __put_kernel_nofault __mvc_kernel_nofault
472 
473 void __cmpxchg_user_key_called_with_bad_pointer(void);
474 
475 #define CMPXCHG_USER_KEY_MAX_LOOPS 128
476 
__cmpxchg_user_key(unsigned long address,void * uval,__uint128_t old,__uint128_t new,unsigned long key,int size)477 static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
478 					      __uint128_t old, __uint128_t new,
479 					      unsigned long key, int size)
480 {
481 	int rc = 0;
482 
483 	switch (size) {
484 	case 1: {
485 		unsigned int prev, shift, mask, _old, _new;
486 		unsigned long count;
487 
488 		shift = (3 ^ (address & 3)) << 3;
489 		address ^= address & 3;
490 		_old = ((unsigned int)old & 0xff) << shift;
491 		_new = ((unsigned int)new & 0xff) << shift;
492 		mask = ~(0xff << shift);
493 		asm_inline volatile(
494 			"	spka	0(%[key])\n"
495 			"	sacf	256\n"
496 			"	llill	%[count],%[max_loops]\n"
497 			"0:	l	%[prev],%[address]\n"
498 			"1:	nr	%[prev],%[mask]\n"
499 			"	xilf	%[mask],0xffffffff\n"
500 			"	or	%[new],%[prev]\n"
501 			"	or	%[prev],%[tmp]\n"
502 			"2:	lr	%[tmp],%[prev]\n"
503 			"3:	cs	%[prev],%[new],%[address]\n"
504 			"4:	jnl	5f\n"
505 			"	xr	%[tmp],%[prev]\n"
506 			"	xr	%[new],%[tmp]\n"
507 			"	nr	%[tmp],%[mask]\n"
508 			"	jnz	5f\n"
509 			"	brct	%[count],2b\n"
510 			"5:	sacf	768\n"
511 			"	spka	%[default_key]\n"
512 			EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
513 			EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
514 			EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
515 			EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
516 			: [rc] "+&d" (rc),
517 			  [prev] "=&d" (prev),
518 			  [address] "+Q" (*(int *)address),
519 			  [tmp] "+&d" (_old),
520 			  [new] "+&d" (_new),
521 			  [mask] "+&d" (mask),
522 			  [count] "=a" (count)
523 			: [key] "%[count]" (key << 4),
524 			  [default_key] "J" (PAGE_DEFAULT_KEY),
525 			  [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
526 			: "memory", "cc");
527 		*(unsigned char *)uval = prev >> shift;
528 		if (!count)
529 			rc = -EAGAIN;
530 		return rc;
531 	}
532 	case 2: {
533 		unsigned int prev, shift, mask, _old, _new;
534 		unsigned long count;
535 
536 		shift = (2 ^ (address & 2)) << 3;
537 		address ^= address & 2;
538 		_old = ((unsigned int)old & 0xffff) << shift;
539 		_new = ((unsigned int)new & 0xffff) << shift;
540 		mask = ~(0xffff << shift);
541 		asm_inline volatile(
542 			"	spka	0(%[key])\n"
543 			"	sacf	256\n"
544 			"	llill	%[count],%[max_loops]\n"
545 			"0:	l	%[prev],%[address]\n"
546 			"1:	nr	%[prev],%[mask]\n"
547 			"	xilf	%[mask],0xffffffff\n"
548 			"	or	%[new],%[prev]\n"
549 			"	or	%[prev],%[tmp]\n"
550 			"2:	lr	%[tmp],%[prev]\n"
551 			"3:	cs	%[prev],%[new],%[address]\n"
552 			"4:	jnl	5f\n"
553 			"	xr	%[tmp],%[prev]\n"
554 			"	xr	%[new],%[tmp]\n"
555 			"	nr	%[tmp],%[mask]\n"
556 			"	jnz	5f\n"
557 			"	brct	%[count],2b\n"
558 			"5:	sacf	768\n"
559 			"	spka	%[default_key]\n"
560 			EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
561 			EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
562 			EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
563 			EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
564 			: [rc] "+&d" (rc),
565 			  [prev] "=&d" (prev),
566 			  [address] "+Q" (*(int *)address),
567 			  [tmp] "+&d" (_old),
568 			  [new] "+&d" (_new),
569 			  [mask] "+&d" (mask),
570 			  [count] "=a" (count)
571 			: [key] "%[count]" (key << 4),
572 			  [default_key] "J" (PAGE_DEFAULT_KEY),
573 			  [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
574 			: "memory", "cc");
575 		*(unsigned short *)uval = prev >> shift;
576 		if (!count)
577 			rc = -EAGAIN;
578 		return rc;
579 	}
580 	case 4:	{
581 		unsigned int prev = old;
582 
583 		asm_inline volatile(
584 			"	spka	0(%[key])\n"
585 			"	sacf	256\n"
586 			"0:	cs	%[prev],%[new],%[address]\n"
587 			"1:	sacf	768\n"
588 			"	spka	%[default_key]\n"
589 			EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
590 			EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
591 			: [rc] "+&d" (rc),
592 			  [prev] "+&d" (prev),
593 			  [address] "+Q" (*(int *)address)
594 			: [new] "d" ((unsigned int)new),
595 			  [key] "a" (key << 4),
596 			  [default_key] "J" (PAGE_DEFAULT_KEY)
597 			: "memory", "cc");
598 		*(unsigned int *)uval = prev;
599 		return rc;
600 	}
601 	case 8: {
602 		unsigned long prev = old;
603 
604 		asm_inline volatile(
605 			"	spka	0(%[key])\n"
606 			"	sacf	256\n"
607 			"0:	csg	%[prev],%[new],%[address]\n"
608 			"1:	sacf	768\n"
609 			"	spka	%[default_key]\n"
610 			EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
611 			EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
612 			: [rc] "+&d" (rc),
613 			  [prev] "+&d" (prev),
614 			  [address] "+QS" (*(long *)address)
615 			: [new] "d" ((unsigned long)new),
616 			  [key] "a" (key << 4),
617 			  [default_key] "J" (PAGE_DEFAULT_KEY)
618 			: "memory", "cc");
619 		*(unsigned long *)uval = prev;
620 		return rc;
621 	}
622 	case 16: {
623 		__uint128_t prev = old;
624 
625 		asm_inline volatile(
626 			"	spka	0(%[key])\n"
627 			"	sacf	256\n"
628 			"0:	cdsg	%[prev],%[new],%[address]\n"
629 			"1:	sacf	768\n"
630 			"	spka	%[default_key]\n"
631 			EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
632 			EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
633 			: [rc] "+&d" (rc),
634 			  [prev] "+&d" (prev),
635 			  [address] "+QS" (*(__int128_t *)address)
636 			: [new] "d" (new),
637 			  [key] "a" (key << 4),
638 			  [default_key] "J" (PAGE_DEFAULT_KEY)
639 			: "memory", "cc");
640 		*(__uint128_t *)uval = prev;
641 		return rc;
642 	}
643 	}
644 	__cmpxchg_user_key_called_with_bad_pointer();
645 	return rc;
646 }
647 
648 /**
649  * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys
650  * @ptr: User space address of value to compare to @old and exchange with
651  *	 @new. Must be aligned to sizeof(*@ptr).
652  * @uval: Address where the old value of *@ptr is written to.
653  * @old: Old value. Compared to the content pointed to by @ptr in order to
654  *	 determine if the exchange occurs. The old value read from *@ptr is
655  *	 written to *@uval.
656  * @new: New value to place at *@ptr.
657  * @key: Access key to use for checking storage key protection.
658  *
659  * Perform a cmpxchg on a user space target, honoring storage key protection.
660  * @key alone determines how key checking is performed, neither
661  * storage-protection-override nor fetch-protection-override apply.
662  * The caller must compare *@uval and @old to determine if values have been
663  * exchanged. In case of an exception *@uval is set to zero.
664  *
665  * Return:     0: cmpxchg executed
666  *	       -EFAULT: an exception happened when trying to access *@ptr
667  *	       -EAGAIN: maxed out number of retries (byte and short only)
668  */
669 #define cmpxchg_user_key(ptr, uval, old, new, key)			\
670 ({									\
671 	__typeof__(ptr) __ptr = (ptr);					\
672 	__typeof__(uval) __uval = (uval);				\
673 									\
674 	BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval)));		\
675 	might_fault();							\
676 	__chk_user_ptr(__ptr);						\
677 	__cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval),	\
678 			   (old), (new), (key), sizeof(*(__ptr)));	\
679 })
680 
681 #endif /* __S390_UACCESS_H */
682