1 /* MN10300 userspace access functions
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 /*
15  * User space memory access functions
16  */
17 #include <linux/thread_info.h>
18 #include <linux/kernel.h>
19 #include <asm/page.h>
20 #include <asm/errno.h>
21 
22 #define VERIFY_READ 0
23 #define VERIFY_WRITE 1
24 
25 /*
26  * The fs value determines whether argument validity checking should be
27  * performed or not.  If get_fs() == USER_DS, checking is performed, with
28  * get_fs() == KERNEL_DS, checking is bypassed.
29  *
30  * For historical reasons, these macros are grossly misnamed.
31  */
32 #define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
33 
34 #define KERNEL_XDS	MAKE_MM_SEG(0xBFFFFFFF)
35 #define KERNEL_DS	MAKE_MM_SEG(0x9FFFFFFF)
36 #define USER_DS		MAKE_MM_SEG(TASK_SIZE)
37 
38 #define get_ds()	(KERNEL_DS)
39 #define get_fs()	(current_thread_info()->addr_limit)
40 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
41 #define __kernel_ds_p() (current_thread_info()->addr_limit.seg == 0x9FFFFFFF)
42 
43 #define segment_eq(a, b) ((a).seg == (b).seg)
44 
45 #define __addr_ok(addr) \
46 	((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
47 
48 /*
49  * check that a range of addresses falls within the current address limit
50  */
___range_ok(unsigned long addr,unsigned int size)51 static inline int ___range_ok(unsigned long addr, unsigned int size)
52 {
53 	int flag = 1, tmp;
54 
55 	asm("	add	%3,%1	\n"	/* set C-flag if addr + size > 4Gb */
56 	    "	bcs	0f	\n"
57 	    "	cmp	%4,%1	\n"	/* jump if addr+size>limit (error) */
58 	    "	bhi	0f	\n"
59 	    "	clr	%0	\n"	/* mark okay */
60 	    "0:			\n"
61 	    : "=r"(flag), "=&r"(tmp)
62 	    : "1"(addr), "ir"(size),
63 	      "r"(current_thread_info()->addr_limit.seg), "0"(flag)
64 	    : "cc"
65 	    );
66 
67 	return flag;
68 }
69 
70 #define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size))
71 
72 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
73 #define __access_ok(addr, size)     (__range_ok((addr), (size)) == 0)
74 
verify_area(int type,const void * addr,unsigned long size)75 static inline int verify_area(int type, const void *addr, unsigned long size)
76 {
77 	return access_ok(type, addr, size) ? 0 : -EFAULT;
78 }
79 
80 
81 /*
82  * The exception table consists of pairs of addresses: the first is the
83  * address of an instruction that is allowed to fault, and the second is
84  * the address at which the program should continue.  No registers are
85  * modified, so it is entirely up to the continuation code to figure out
86  * what to do.
87  *
88  * All the routines below use bits of fixup code that are out of line
89  * with the main instruction path.  This means when everything is well,
90  * we don't even have to jump over them.  Further, they do not intrude
91  * on our cache or tlb entries.
92  */
93 
94 struct exception_table_entry
95 {
96 	unsigned long insn, fixup;
97 };
98 
99 /* Returns 0 if exception not found and fixup otherwise.  */
100 extern int fixup_exception(struct pt_regs *regs);
101 
102 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
103 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
104 
105 /*
106  * The "__xxx" versions do not do address space checking, useful when
107  * doing multiple accesses to the same area (the user has to do the
108  * checks by hand with "access_ok()")
109  */
110 #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
111 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
112 
113 /*
114  * The "xxx_ret" versions return constant specified in third argument, if
115  * something bad happens. These macros can be optimized for the
116  * case of just returning from the function xxx_ret is used.
117  */
118 
119 #define put_user_ret(x, ptr, ret) \
120 	({ if (put_user((x), (ptr)))	return (ret); })
121 #define get_user_ret(x, ptr, ret) \
122 	({ if (get_user((x), (ptr)))	return (ret); })
123 #define __put_user_ret(x, ptr, ret) \
124 	({ if (__put_user((x), (ptr)))	return (ret); })
125 #define __get_user_ret(x, ptr, ret) \
126 	({ if (__get_user((x), (ptr)))	return (ret); })
127 
128 struct __large_struct { unsigned long buf[100]; };
129 #define __m(x) (*(struct __large_struct *)(x))
130 
131 #define __get_user_nocheck(x, ptr, size)				\
132 ({									\
133 	unsigned long __gu_addr;					\
134 	int __gu_err;							\
135 	__gu_addr = (unsigned long) (ptr);				\
136 	switch (size) {							\
137 	case 1: {							\
138 		unsigned char __gu_val;					\
139 		__get_user_asm("bu");					\
140 		(x) = *(__force __typeof__(*(ptr))*) &__gu_val;		\
141 		break;							\
142 	}								\
143 	case 2: {							\
144 		unsigned short __gu_val;				\
145 		__get_user_asm("hu");					\
146 		(x) = *(__force __typeof__(*(ptr))*) &__gu_val;		\
147 		break;							\
148 	}								\
149 	case 4: {							\
150 		unsigned int __gu_val;					\
151 		__get_user_asm("");					\
152 		(x) = *(__force __typeof__(*(ptr))*) &__gu_val;		\
153 		break;							\
154 	}								\
155 	default:							\
156 		__get_user_unknown();					\
157 		break;							\
158 	}								\
159 	__gu_err;							\
160 })
161 
162 #define __get_user_check(x, ptr, size)					\
163 ({									\
164 	const __typeof__(ptr) __guc_ptr = (ptr);			\
165 	int _e;								\
166 	if (likely(__access_ok((unsigned long) __guc_ptr, (size))))	\
167 		_e = __get_user_nocheck((x), __guc_ptr, (size));	\
168 	else {								\
169 		_e = -EFAULT;						\
170 		(x) = (__typeof__(x))0;					\
171 	}								\
172 	_e;								\
173 })
174 
175 #define __get_user_asm(INSN)					\
176 ({								\
177 	asm volatile(					\
178 		"1:\n"						\
179 		"	mov"INSN"	%2,%1\n"		\
180 		"	mov		0,%0\n"			\
181 		"2:\n"						\
182 		"	.section	.fixup,\"ax\"\n"	\
183 		"3:\n\t"					\
184 		"	mov		%3,%0\n"		\
185 		"	jmp		2b\n"			\
186 		"	.previous\n"				\
187 		"	.section	__ex_table,\"a\"\n"	\
188 		"	.balign		4\n"			\
189 		"	.long		1b, 3b\n"		\
190 		"	.previous"				\
191 		: "=&r" (__gu_err), "=&r" (__gu_val)		\
192 		: "m" (__m(__gu_addr)), "i" (-EFAULT));		\
193 })
194 
195 extern int __get_user_unknown(void);
196 
197 #define __put_user_nocheck(x, ptr, size)			\
198 ({								\
199 	union {							\
200 		__typeof__(*(ptr)) val;				\
201 		u32 bits[2];					\
202 	} __pu_val;						\
203 	unsigned long __pu_addr;				\
204 	int __pu_err;						\
205 	__pu_val.val = (x);					\
206 	__pu_addr = (unsigned long) (ptr);			\
207 	switch (size) {						\
208 	case 1:  __put_user_asm("bu"); break;			\
209 	case 2:  __put_user_asm("hu"); break;			\
210 	case 4:  __put_user_asm(""  ); break;			\
211 	case 8:  __put_user_asm8();    break;			\
212 	default: __pu_err = __put_user_unknown(); break;	\
213 	}							\
214 	__pu_err;						\
215 })
216 
217 #define __put_user_check(x, ptr, size)					\
218 ({									\
219 	union {								\
220 		__typeof__(*(ptr)) val;					\
221 		u32 bits[2];						\
222 	} __pu_val;							\
223 	unsigned long __pu_addr;					\
224 	int __pu_err;							\
225 	__pu_val.val = (x);						\
226 	__pu_addr = (unsigned long) (ptr);				\
227 	if (likely(__access_ok(__pu_addr, size))) {			\
228 		switch (size) {						\
229 		case 1:  __put_user_asm("bu"); break;			\
230 		case 2:  __put_user_asm("hu"); break;			\
231 		case 4:  __put_user_asm(""  ); break;			\
232 		case 8:  __put_user_asm8();    break;			\
233 		default: __pu_err = __put_user_unknown(); break;	\
234 		}							\
235 	}								\
236 	else {								\
237 		__pu_err = -EFAULT;					\
238 	}								\
239 	__pu_err;							\
240 })
241 
242 #define __put_user_asm(INSN)					\
243 ({								\
244 	asm volatile(						\
245 		"1:\n"						\
246 		"	mov"INSN"	%1,%2\n"		\
247 		"	mov		0,%0\n"			\
248 		"2:\n"						\
249 		"	.section	.fixup,\"ax\"\n"	\
250 		"3:\n"						\
251 		"	mov		%3,%0\n"		\
252 		"	jmp		2b\n"			\
253 		"	.previous\n"				\
254 		"	.section	__ex_table,\"a\"\n"	\
255 		"	.balign		4\n"			\
256 		"	.long		1b, 3b\n"		\
257 		"	.previous"				\
258 		: "=&r" (__pu_err)				\
259 		: "r" (__pu_val.val), "m" (__m(__pu_addr)),	\
260 		  "i" (-EFAULT)					\
261 		);						\
262 })
263 
264 #define __put_user_asm8()						\
265 ({									\
266 	asm volatile(							\
267 		"1:	mov		%1,%3		\n"		\
268 		"2:	mov		%2,%4		\n"		\
269 		"	mov		0,%0		\n"		\
270 		"3:					\n"		\
271 		"	.section	.fixup,\"ax\"	\n"		\
272 		"4:					\n"		\
273 		"	mov		%5,%0		\n"		\
274 		"	jmp		3b		\n"		\
275 		"	.previous			\n"		\
276 		"	.section	__ex_table,\"a\"\n"		\
277 		"	.balign		4		\n"		\
278 		"	.long		1b, 4b		\n"		\
279 		"	.long		2b, 4b		\n"		\
280 		"	.previous			\n"		\
281 		: "=&r" (__pu_err)					\
282 		: "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]),	\
283 		  "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)),		\
284 		  "i" (-EFAULT)						\
285 		);							\
286 })
287 
288 extern int __put_user_unknown(void);
289 
290 
291 /*
292  * Copy To/From Userspace
293  */
294 /* Generic arbitrary sized copy.  */
295 #define __copy_user(to, from, size)					\
296 do {									\
297 	if (size) {							\
298 		void *__to = to;					\
299 		const void *__from = from;				\
300 		int w;							\
301 		asm volatile(						\
302 			"0:     movbu	(%0),%3;\n"			\
303 			"1:     movbu	%3,(%1);\n"			\
304 			"	inc	%0;\n"				\
305 			"	inc	%1;\n"				\
306 			"       add	-1,%2;\n"			\
307 			"       bne	0b;\n"				\
308 			"2:\n"						\
309 			"	.section .fixup,\"ax\"\n"		\
310 			"3:	jmp	2b\n"				\
311 			"	.previous\n"				\
312 			"	.section __ex_table,\"a\"\n"		\
313 			"       .balign	4\n"				\
314 			"       .long	0b,3b\n"			\
315 			"       .long	1b,3b\n"			\
316 			"	.previous\n"				\
317 			: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
318 			: "0"(__from), "1"(__to), "2"(size)		\
319 			: "cc", "memory");				\
320 	}								\
321 } while (0)
322 
323 #define __copy_user_zeroing(to, from, size)				\
324 do {									\
325 	if (size) {							\
326 		void *__to = to;					\
327 		const void *__from = from;				\
328 		int w;							\
329 		asm volatile(						\
330 			"0:     movbu	(%0),%3;\n"			\
331 			"1:     movbu	%3,(%1);\n"			\
332 			"	inc	%0;\n"				\
333 			"	inc	%1;\n"				\
334 			"       add	-1,%2;\n"			\
335 			"       bne	0b;\n"				\
336 			"2:\n"						\
337 			"	.section .fixup,\"ax\"\n"		\
338 			"3:\n"						\
339 			"	mov	%2,%0\n"			\
340 			"	clr	%3\n"				\
341 			"4:     movbu	%3,(%1);\n"			\
342 			"	inc	%1;\n"				\
343 			"       add	-1,%2;\n"			\
344 			"       bne	4b;\n"				\
345 			"	mov	%0,%2\n"			\
346 			"	jmp	2b\n"				\
347 			"	.previous\n"				\
348 			"	.section __ex_table,\"a\"\n"		\
349 			"       .balign	4\n"				\
350 			"       .long	0b,3b\n"			\
351 			"       .long	1b,3b\n"			\
352 			"	.previous\n"				\
353 			: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
354 			: "0"(__from), "1"(__to), "2"(size)		\
355 			: "cc", "memory");				\
356 	}								\
357 } while (0)
358 
359 /* We let the __ versions of copy_from/to_user inline, because they're often
360  * used in fast paths and have only a small space overhead.
361  */
362 static inline
__generic_copy_from_user_nocheck(void * to,const void * from,unsigned long n)363 unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
364 					       unsigned long n)
365 {
366 	__copy_user_zeroing(to, from, n);
367 	return n;
368 }
369 
370 static inline
__generic_copy_to_user_nocheck(void * to,const void * from,unsigned long n)371 unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
372 					     unsigned long n)
373 {
374 	__copy_user(to, from, n);
375 	return n;
376 }
377 
378 
379 #if 0
380 #error "don't use - these macros don't increment to & from pointers"
381 /* Optimize just a little bit when we know the size of the move. */
382 #define __constant_copy_user(to, from, size)	\
383 do {						\
384 	asm volatile(				\
385 		"       mov %0,a0;\n"		\
386 		"0:     movbu (%1),d3;\n"	\
387 		"1:     movbu d3,(%2);\n"	\
388 		"       add -1,a0;\n"		\
389 		"       bne 0b;\n"		\
390 		"2:;"				\
391 		".section .fixup,\"ax\"\n"	\
392 		"3:	jmp 2b\n"		\
393 		".previous\n"			\
394 		".section __ex_table,\"a\"\n"	\
395 		"       .balign 4\n"		\
396 		"       .long 0b,3b\n"		\
397 		"       .long 1b,3b\n"		\
398 		".previous"			\
399 		:				\
400 		: "d"(size), "d"(to), "d"(from)	\
401 		: "d3", "a0");			\
402 } while (0)
403 
404 /* Optimize just a little bit when we know the size of the move. */
405 #define __constant_copy_user_zeroing(to, from, size)	\
406 do {							\
407 	asm volatile(					\
408 		"       mov %0,a0;\n"			\
409 		"0:     movbu (%1),d3;\n"		\
410 		"1:     movbu d3,(%2);\n"		\
411 		"       add -1,a0;\n"			\
412 		"       bne 0b;\n"			\
413 		"2:;"					\
414 		".section .fixup,\"ax\"\n"		\
415 		"3:	jmp 2b\n"			\
416 		".previous\n"				\
417 		".section __ex_table,\"a\"\n"		\
418 		"       .balign 4\n"			\
419 		"       .long 0b,3b\n"			\
420 		"       .long 1b,3b\n"			\
421 		".previous"				\
422 		:					\
423 		: "d"(size), "d"(to), "d"(from)		\
424 		: "d3", "a0");				\
425 } while (0)
426 
427 static inline
428 unsigned long __constant_copy_to_user(void *to, const void *from,
429 				      unsigned long n)
430 {
431 	if (access_ok(VERIFY_WRITE, to, n))
432 		__constant_copy_user(to, from, n);
433 	return n;
434 }
435 
436 static inline
437 unsigned long __constant_copy_from_user(void *to, const void *from,
438 					unsigned long n)
439 {
440 	if (access_ok(VERIFY_READ, from, n))
441 		__constant_copy_user_zeroing(to, from, n);
442 	return n;
443 }
444 
445 static inline
446 unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
447 					      unsigned long n)
448 {
449 	__constant_copy_user(to, from, n);
450 	return n;
451 }
452 
453 static inline
454 unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
455 						unsigned long n)
456 {
457 	__constant_copy_user_zeroing(to, from, n);
458 	return n;
459 }
460 #endif
461 
462 extern unsigned long __generic_copy_to_user(void __user *, const void *,
463 					    unsigned long);
464 extern unsigned long __generic_copy_from_user(void *, const void __user *,
465 					      unsigned long);
466 
467 #define __copy_to_user_inatomic(to, from, n) \
468 	__generic_copy_to_user_nocheck((to), (from), (n))
469 #define __copy_from_user_inatomic(to, from, n) \
470 	__generic_copy_from_user_nocheck((to), (from), (n))
471 
472 #define __copy_to_user(to, from, n)			\
473 ({							\
474 	might_sleep();					\
475 	__copy_to_user_inatomic((to), (from), (n));	\
476 })
477 
478 #define __copy_from_user(to, from, n)			\
479 ({							\
480 	might_sleep();					\
481 	__copy_from_user_inatomic((to), (from), (n));	\
482 })
483 
484 
485 #define copy_to_user(to, from, n)   __generic_copy_to_user((to), (from), (n))
486 #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
487 
488 extern long strncpy_from_user(char *dst, const char __user *src, long count);
489 extern long __strncpy_from_user(char *dst, const char __user *src, long count);
490 extern long strnlen_user(const char __user *str, long n);
491 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
492 extern unsigned long clear_user(void __user *mem, unsigned long len);
493 extern unsigned long __clear_user(void __user *mem, unsigned long len);
494 
495 #endif /* _ASM_UACCESS_H */
496