1 /* uaccess.h: userspace accessor functions
2  *
3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #ifndef _ASM_UACCESS_H
13 #define _ASM_UACCESS_H
14 
15 /*
16  * User space memory access functions
17  */
18 #include <linux/sched.h>
19 #include <linux/mm.h>
20 #include <asm/segment.h>
21 #include <asm/sections.h>
22 
23 #define HAVE_ARCH_UNMAPPED_AREA	/* we decide where to put mmaps */
24 
25 #define __ptr(x) ((unsigned long __force *)(x))
26 
27 #define VERIFY_READ	0
28 #define VERIFY_WRITE	1
29 
30 /*
31  * check that a range of addresses falls within the current address limit
32  */
___range_ok(unsigned long addr,unsigned long size)33 static inline int ___range_ok(unsigned long addr, unsigned long size)
34 {
35 #ifdef CONFIG_MMU
36 	int flag = -EFAULT, tmp;
37 
38 	asm volatile (
39 		"	addcc	%3,%2,%1,icc0	\n"	/* set C-flag if addr+size>4GB */
40 		"	subcc.p	%1,%4,gr0,icc1	\n"	/* jump if addr+size>limit */
41 		"	bc	icc0,#0,0f	\n"
42 		"	bhi	icc1,#0,0f	\n"
43 		"	setlos	#0,%0		\n"	/* mark okay */
44 		"0:				\n"
45 		: "=r"(flag), "=&r"(tmp)
46 		: "r"(addr), "r"(size), "r"(get_addr_limit()), "0"(flag)
47 		);
48 
49 	return flag;
50 
51 #else
52 
53 	if (addr < memory_start ||
54 	    addr > memory_end ||
55 	    size > memory_end - memory_start ||
56 	    addr + size > memory_end)
57 		return -EFAULT;
58 
59 	return 0;
60 #endif
61 }
62 
63 #define __range_ok(addr,size) ___range_ok((unsigned long) (addr), (unsigned long) (size))
64 
65 #define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0)
66 #define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)
67 
68 /*
69  * The exception table consists of pairs of addresses: the first is the
70  * address of an instruction that is allowed to fault, and the second is
71  * the address at which the program should continue.  No registers are
72  * modified, so it is entirely up to the continuation code to figure out
73  * what to do.
74  *
75  * All the routines below use bits of fixup code that are out of line
76  * with the main instruction path.  This means when everything is well,
77  * we don't even have to jump over them.  Further, they do not intrude
78  * on our cache or tlb entries.
79  */
80 struct exception_table_entry
81 {
82 	unsigned long insn, fixup;
83 };
84 
85 /* Returns 0 if exception not found and fixup otherwise.  */
86 extern unsigned long search_exception_table(unsigned long);
87 
88 
89 /*
90  * These are the main single-value transfer routines.  They automatically
91  * use the right size if we just have the right pointer type.
92  */
93 #define __put_user(x, ptr)						\
94 ({									\
95 	int __pu_err = 0;						\
96 									\
97 	typeof(*(ptr)) __pu_val = (x);					\
98 	__chk_user_ptr(ptr);						\
99 									\
100 	switch (sizeof (*(ptr))) {					\
101 	case 1:								\
102 		__put_user_asm(__pu_err, __pu_val, ptr, "b", "r");	\
103 		break;							\
104 	case 2:								\
105 		__put_user_asm(__pu_err, __pu_val, ptr, "h", "r");	\
106 		break;							\
107 	case 4:								\
108 		__put_user_asm(__pu_err, __pu_val, ptr, "",  "r");	\
109 		break;							\
110 	case 8:								\
111 		__put_user_asm(__pu_err, __pu_val, ptr, "d", "e");	\
112 		break;							\
113 	default:							\
114 		__pu_err = __put_user_bad();				\
115 		break;							\
116 	}								\
117 	__pu_err;							\
118 })
119 
120 #define put_user(x, ptr)			\
121 ({						\
122 	typeof(*(ptr)) __user *_p = (ptr);	\
123 	int _e;					\
124 						\
125 	_e = __range_ok(_p, sizeof(*_p));	\
126 	if (_e == 0)				\
127 		_e = __put_user((x), _p);	\
128 	_e;					\
129 })
130 
131 extern int __put_user_bad(void);
132 
133 /*
134  * Tell gcc we read from memory instead of writing: this is because
135  * we do not write to any memory gcc knows about, so there are no
136  * aliasing issues.
137  */
138 
139 #ifdef CONFIG_MMU
140 
141 #define __put_user_asm(err,x,ptr,dsize,constraint)					\
142 do {											\
143 	asm volatile("1:	st"dsize"%I1	%2,%M1	\n"				\
144 		     "2:				\n"				\
145 		     ".subsection 2			\n"				\
146 		     "3:	setlos		%3,%0	\n"				\
147 		     "		bra		2b	\n"				\
148 		     ".previous				\n"				\
149 		     ".section __ex_table,\"a\"		\n"				\
150 		     "		.balign		8	\n"				\
151 		     "		.long		1b,3b	\n"				\
152 		     ".previous"							\
153 		     : "=r" (err)							\
154 		     : "m" (*__ptr(ptr)), constraint (x), "i"(-EFAULT), "0"(err)	\
155 		     : "memory");							\
156 } while (0)
157 
158 #else
159 
160 #define __put_user_asm(err,x,ptr,bwl,con)	\
161 do {						\
162 	asm("	st"bwl"%I0	%1,%M0	\n"	\
163 	    "	membar			\n"	\
164 	    :					\
165 	    : "m" (*__ptr(ptr)), con (x)	\
166 	    : "memory");			\
167 } while (0)
168 
169 #endif
170 
171 /*****************************************************************************/
172 /*
173  *
174  */
175 #define __get_user(x, ptr)						\
176 ({									\
177 	int __gu_err = 0;						\
178 	__chk_user_ptr(ptr);						\
179 									\
180 	switch (sizeof(*(ptr))) {					\
181 	case 1: {							\
182 		unsigned char __gu_val;					\
183 		__get_user_asm(__gu_err, __gu_val, ptr, "ub", "=r");	\
184 		(x) = *(__force __typeof__(*(ptr)) *) &__gu_val;	\
185 		break;							\
186 	}								\
187 	case 2: {							\
188 		unsigned short __gu_val;				\
189 		__get_user_asm(__gu_err, __gu_val, ptr, "uh", "=r");	\
190 		(x) = *(__force __typeof__(*(ptr)) *) &__gu_val;	\
191 		break;							\
192 	}								\
193 	case 4: {							\
194 		unsigned int __gu_val;					\
195 		__get_user_asm(__gu_err, __gu_val, ptr, "", "=r");	\
196 		(x) = *(__force __typeof__(*(ptr)) *) &__gu_val;	\
197 		break;							\
198 	}								\
199 	case 8: {							\
200 		unsigned long long __gu_val;				\
201 		__get_user_asm(__gu_err, __gu_val, ptr, "d", "=e");	\
202 		(x) = *(__force __typeof__(*(ptr)) *) &__gu_val;	\
203 		break;							\
204 	}								\
205 	default:							\
206 		__gu_err = __get_user_bad();				\
207 		break;							\
208 	}								\
209 	__gu_err;							\
210 })
211 
212 #define get_user(x, ptr)			\
213 ({						\
214 	const typeof(*(ptr)) __user *_p = (ptr);\
215 	int _e;					\
216 						\
217 	_e = __range_ok(_p, sizeof(*_p));	\
218 	if (likely(_e == 0))			\
219 		_e = __get_user((x), _p);	\
220 	else					\
221 		(x) = (typeof(x)) 0;		\
222 	_e;					\
223 })
224 
225 extern int __get_user_bad(void);
226 
227 #ifdef CONFIG_MMU
228 
229 #define __get_user_asm(err,x,ptr,dtype,constraint)	\
230 do {							\
231 	asm("1:		ld"dtype"%I2	%M2,%1	\n"	\
232 	    "2:					\n"	\
233 	    ".subsection 2			\n"	\
234 	    "3:		setlos		%3,%0	\n"	\
235 	    "		setlos		#0,%1	\n"	\
236 	    "		bra		2b	\n"	\
237 	    ".previous				\n"	\
238 	    ".section __ex_table,\"a\"		\n"	\
239 	    "		.balign		8	\n"	\
240 	    "		.long		1b,3b	\n"	\
241 	    ".previous"					\
242 	    : "=r" (err), constraint (x)		\
243 	    : "m" (*__ptr(ptr)), "i"(-EFAULT), "0"(err)	\
244 	    );						\
245 } while(0)
246 
247 #else
248 
249 #define __get_user_asm(err,x,ptr,bwl,con)	\
250 	asm("	ld"bwl"%I1	%M1,%0	\n"	\
251 	    "	membar			\n"	\
252 	    : con(x)				\
253 	    : "m" (*__ptr(ptr)))
254 
255 #endif
256 
257 /*****************************************************************************/
258 /*
259  *
260  */
261 #define ____force(x) (__force void *)(void __user *)(x)
262 #ifdef CONFIG_MMU
263 extern long __memset_user(void *dst, unsigned long count);
264 extern long __memcpy_user(void *dst, const void *src, unsigned long count);
265 
266 #define clear_user(dst,count)			__memset_user(____force(dst), (count))
267 #define __copy_from_user_inatomic(to, from, n)	__memcpy_user((to), ____force(from), (n))
268 #define __copy_to_user_inatomic(to, from, n)	__memcpy_user(____force(to), (from), (n))
269 
270 #else
271 
272 #define clear_user(dst,count)			(memset(____force(dst), 0, (count)), 0)
273 #define __copy_from_user_inatomic(to, from, n)	(memcpy((to), ____force(from), (n)), 0)
274 #define __copy_to_user_inatomic(to, from, n)	(memcpy(____force(to), (from), (n)), 0)
275 
276 #endif
277 
278 #define __clear_user clear_user
279 
280 static inline unsigned long __must_check
__copy_to_user(void __user * to,const void * from,unsigned long n)281 __copy_to_user(void __user *to, const void *from, unsigned long n)
282 {
283        might_sleep();
284        return __copy_to_user_inatomic(to, from, n);
285 }
286 
287 static inline unsigned long
__copy_from_user(void * to,const void __user * from,unsigned long n)288 __copy_from_user(void *to, const void __user *from, unsigned long n)
289 {
290        might_sleep();
291        return __copy_from_user_inatomic(to, from, n);
292 }
293 
copy_from_user(void * to,const void __user * from,unsigned long n)294 static inline long copy_from_user(void *to, const void __user *from, unsigned long n)
295 {
296 	unsigned long ret = n;
297 
298 	if (likely(__access_ok(from, n)))
299 		ret = __copy_from_user(to, from, n);
300 
301 	if (unlikely(ret != 0))
302 		memset(to + (n - ret), 0, ret);
303 
304 	return ret;
305 }
306 
copy_to_user(void __user * to,const void * from,unsigned long n)307 static inline long copy_to_user(void __user *to, const void *from, unsigned long n)
308 {
309 	return likely(__access_ok(to, n)) ? __copy_to_user(to, from, n) : n;
310 }
311 
312 extern long strncpy_from_user(char *dst, const char __user *src, long count);
313 extern long strnlen_user(const char __user *src, long count);
314 
315 #define strlen_user(str) strnlen_user(str, 32767)
316 
317 extern unsigned long search_exception_table(unsigned long addr);
318 
319 #endif /* _ASM_UACCESS_H */
320