1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
4
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/instrumented.h>
7 #include <linux/minmax.h>
8 #include <linux/nospec.h>
9 #include <linux/sched.h>
10 #include <linux/ucopysize.h>
11
12 #include <asm/uaccess.h>
13
14 /*
15 * Architectures that support memory tagging (assigning tags to memory regions,
16 * embedding these tags into addresses that point to these memory regions, and
17 * checking that the memory and the pointer tags match on memory accesses)
18 * redefine this macro to strip tags from pointers.
19 *
20 * Passing down mm_struct allows to define untagging rules on per-process
21 * basis.
22 *
23 * It's defined as noop for architectures that don't support memory tagging.
24 */
25 #ifndef untagged_addr
26 #define untagged_addr(addr) (addr)
27 #endif
28
29 #ifndef untagged_addr_remote
30 #define untagged_addr_remote(mm, addr) ({ \
31 mmap_assert_locked(mm); \
32 untagged_addr(addr); \
33 })
34 #endif
35
36 #ifdef masked_user_access_begin
37 #define can_do_masked_user_access() 1
38 #else
39 #define can_do_masked_user_access() 0
40 #define masked_user_access_begin(src) NULL
41 #define mask_user_address(src) (src)
42 #endif
43
44 /*
45 * Architectures should provide two primitives (raw_copy_{to,from}_user())
46 * and get rid of their private instances of copy_{to,from}_user() and
47 * __copy_{to,from}_user{,_inatomic}().
48 *
49 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
50 * return the amount left to copy. They should assume that access_ok() has
51 * already been checked (and succeeded); they should *not* zero-pad anything.
52 * No KASAN or object size checks either - those belong here.
53 *
54 * Both of these functions should attempt to copy size bytes starting at from
55 * into the area starting at to. They must not fetch or store anything
56 * outside of those areas. Return value must be between 0 (everything
57 * copied successfully) and size (nothing copied).
58 *
59 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
60 * at to must become equal to the bytes fetched from the corresponding area
61 * starting at from. All data past to + size - N must be left unmodified.
62 *
63 * If copying succeeds, the return value must be 0. If some data cannot be
64 * fetched, it is permitted to copy less than had been fetched; the only
65 * hard requirement is that not storing anything at all (i.e. returning size)
66 * should happen only when nothing could be copied. In other words, you don't
67 * have to squeeze as much as possible - it is allowed, but not necessary.
68 *
69 * For raw_copy_from_user() to always points to kernel memory and no faults
70 * on store should happen. Interpretation of from is affected by set_fs().
71 * For raw_copy_to_user() it's the other way round.
72 *
73 * Both can be inlined - it's up to architectures whether it wants to bother
74 * with that. They should not be used directly; they are used to implement
75 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
76 * that are used instead. Out of those, __... ones are inlined. Plain
77 * copy_{to,from}_user() might or might not be inlined. If you want them
78 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
79 *
80 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
81 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
82 * at all; their callers absolutely must check the return value.
83 *
84 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
85 * but both source and destination are __user pointers (affected by set_fs()
86 * as usual) and both source and destination can trigger faults.
87 */
88
89 static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void * to,const void __user * from,unsigned long n)90 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
91 {
92 unsigned long res;
93
94 instrument_copy_from_user_before(to, from, n);
95 check_object_size(to, n, false);
96 res = raw_copy_from_user(to, from, n);
97 instrument_copy_from_user_after(to, from, n, res);
98 return res;
99 }
100
101 static __always_inline __must_check unsigned long
__copy_from_user(void * to,const void __user * from,unsigned long n)102 __copy_from_user(void *to, const void __user *from, unsigned long n)
103 {
104 unsigned long res;
105
106 might_fault();
107 instrument_copy_from_user_before(to, from, n);
108 if (should_fail_usercopy())
109 return n;
110 check_object_size(to, n, false);
111 res = raw_copy_from_user(to, from, n);
112 instrument_copy_from_user_after(to, from, n, res);
113 return res;
114 }
115
116 /**
117 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
118 * @to: Destination address, in user space.
119 * @from: Source address, in kernel space.
120 * @n: Number of bytes to copy.
121 *
122 * Context: User context only.
123 *
124 * Copy data from kernel space to user space. Caller must check
125 * the specified block with access_ok() before calling this function.
126 * The caller should also make sure he pins the user space address
127 * so that we don't result in page fault and sleep.
128 */
129 static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user * to,const void * from,unsigned long n)130 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
131 {
132 if (should_fail_usercopy())
133 return n;
134 instrument_copy_to_user(to, from, n);
135 check_object_size(from, n, true);
136 return raw_copy_to_user(to, from, n);
137 }
138
139 static __always_inline __must_check unsigned long
__copy_to_user(void __user * to,const void * from,unsigned long n)140 __copy_to_user(void __user *to, const void *from, unsigned long n)
141 {
142 might_fault();
143 if (should_fail_usercopy())
144 return n;
145 instrument_copy_to_user(to, from, n);
146 check_object_size(from, n, true);
147 return raw_copy_to_user(to, from, n);
148 }
149
150 /*
151 * Architectures that #define INLINE_COPY_TO_USER use this function
152 * directly in the normal copy_to/from_user(), the other ones go
153 * through an extern _copy_to/from_user(), which expands the same code
154 * here.
155 *
156 * Rust code always uses the extern definition.
157 */
158 static inline __must_check unsigned long
_inline_copy_from_user(void * to,const void __user * from,unsigned long n)159 _inline_copy_from_user(void *to, const void __user *from, unsigned long n)
160 {
161 unsigned long res = n;
162 might_fault();
163 if (should_fail_usercopy())
164 goto fail;
165 if (can_do_masked_user_access())
166 from = mask_user_address(from);
167 else {
168 if (!access_ok(from, n))
169 goto fail;
170 /*
171 * Ensure that bad access_ok() speculation will not
172 * lead to nasty side effects *after* the copy is
173 * finished:
174 */
175 barrier_nospec();
176 }
177 instrument_copy_from_user_before(to, from, n);
178 res = raw_copy_from_user(to, from, n);
179 instrument_copy_from_user_after(to, from, n, res);
180 if (likely(!res))
181 return 0;
182 fail:
183 memset(to + (n - res), 0, res);
184 return res;
185 }
186 extern __must_check unsigned long
187 _copy_from_user(void *, const void __user *, unsigned long);
188
189 static inline __must_check unsigned long
_inline_copy_to_user(void __user * to,const void * from,unsigned long n)190 _inline_copy_to_user(void __user *to, const void *from, unsigned long n)
191 {
192 might_fault();
193 if (should_fail_usercopy())
194 return n;
195 if (access_ok(to, n)) {
196 instrument_copy_to_user(to, from, n);
197 n = raw_copy_to_user(to, from, n);
198 }
199 return n;
200 }
201 extern __must_check unsigned long
202 _copy_to_user(void __user *, const void *, unsigned long);
203
204 static __always_inline unsigned long __must_check
copy_from_user(void * to,const void __user * from,unsigned long n)205 copy_from_user(void *to, const void __user *from, unsigned long n)
206 {
207 if (!check_copy_size(to, n, false))
208 return n;
209 #ifdef INLINE_COPY_FROM_USER
210 return _inline_copy_from_user(to, from, n);
211 #else
212 return _copy_from_user(to, from, n);
213 #endif
214 }
215
216 static __always_inline unsigned long __must_check
copy_to_user(void __user * to,const void * from,unsigned long n)217 copy_to_user(void __user *to, const void *from, unsigned long n)
218 {
219 if (!check_copy_size(from, n, true))
220 return n;
221
222 #ifdef INLINE_COPY_TO_USER
223 return _inline_copy_to_user(to, from, n);
224 #else
225 return _copy_to_user(to, from, n);
226 #endif
227 }
228
229 #ifndef copy_mc_to_kernel
230 /*
231 * Without arch opt-in this generic copy_mc_to_kernel() will not handle
232 * #MC (or arch equivalent) during source read.
233 */
234 static inline unsigned long __must_check
copy_mc_to_kernel(void * dst,const void * src,size_t cnt)235 copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
236 {
237 memcpy(dst, src, cnt);
238 return 0;
239 }
240 #endif
241
pagefault_disabled_inc(void)242 static __always_inline void pagefault_disabled_inc(void)
243 {
244 current->pagefault_disabled++;
245 }
246
pagefault_disabled_dec(void)247 static __always_inline void pagefault_disabled_dec(void)
248 {
249 current->pagefault_disabled--;
250 }
251
252 /*
253 * These routines enable/disable the pagefault handler. If disabled, it will
254 * not take any locks and go straight to the fixup table.
255 *
256 * User access methods will not sleep when called from a pagefault_disabled()
257 * environment.
258 */
pagefault_disable(void)259 static inline void pagefault_disable(void)
260 {
261 pagefault_disabled_inc();
262 /*
263 * make sure to have issued the store before a pagefault
264 * can hit.
265 */
266 barrier();
267 }
268
pagefault_enable(void)269 static inline void pagefault_enable(void)
270 {
271 /*
272 * make sure to issue those last loads/stores before enabling
273 * the pagefault handler again.
274 */
275 barrier();
276 pagefault_disabled_dec();
277 }
278
279 /*
280 * Is the pagefault handler disabled? If so, user access methods will not sleep.
281 */
pagefault_disabled(void)282 static inline bool pagefault_disabled(void)
283 {
284 return current->pagefault_disabled != 0;
285 }
286
287 /*
288 * The pagefault handler is in general disabled by pagefault_disable() or
289 * when in irq context (via in_atomic()).
290 *
291 * This function should only be used by the fault handlers. Other users should
292 * stick to pagefault_disabled().
293 * Please NEVER use preempt_disable() to disable the fault handler. With
294 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
295 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
296 */
297 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
298
DEFINE_LOCK_GUARD_0(pagefault,pagefault_disable (),pagefault_enable ())299 DEFINE_LOCK_GUARD_0(pagefault, pagefault_disable(), pagefault_enable())
300
301 #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
302
303 /**
304 * probe_subpage_writeable: probe the user range for write faults at sub-page
305 * granularity (e.g. arm64 MTE)
306 * @uaddr: start of address range
307 * @size: size of address range
308 *
309 * Returns 0 on success, the number of bytes not probed on fault.
310 *
311 * It is expected that the caller checked for the write permission of each
312 * page in the range either by put_user() or GUP. The architecture port can
313 * implement a more efficient get_user() probing if the same sub-page faults
314 * are triggered by either a read or a write.
315 */
316 static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
317 {
318 return 0;
319 }
320
321 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
322
323 #ifndef ARCH_HAS_NOCACHE_UACCESS
324
325 static inline __must_check unsigned long
__copy_from_user_inatomic_nocache(void * to,const void __user * from,unsigned long n)326 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
327 unsigned long n)
328 {
329 return __copy_from_user_inatomic(to, from, n);
330 }
331
332 #endif /* ARCH_HAS_NOCACHE_UACCESS */
333
334 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
335
336 /**
337 * copy_struct_from_user: copy a struct from userspace
338 * @dst: Destination address, in kernel space. This buffer must be @ksize
339 * bytes long.
340 * @ksize: Size of @dst struct.
341 * @src: Source address, in userspace.
342 * @usize: (Alleged) size of @src struct.
343 *
344 * Copies a struct from userspace to kernel space, in a way that guarantees
345 * backwards-compatibility for struct syscall arguments (as long as future
346 * struct extensions are made such that all new fields are *appended* to the
347 * old struct, and zeroed-out new fields have the same meaning as the old
348 * struct).
349 *
350 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
351 * The recommended usage is something like the following:
352 *
353 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
354 * {
355 * int err;
356 * struct foo karg = {};
357 *
358 * if (usize > PAGE_SIZE)
359 * return -E2BIG;
360 * if (usize < FOO_SIZE_VER0)
361 * return -EINVAL;
362 *
363 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
364 * if (err)
365 * return err;
366 *
367 * // ...
368 * }
369 *
370 * There are three cases to consider:
371 * * If @usize == @ksize, then it's copied verbatim.
372 * * If @usize < @ksize, then the userspace has passed an old struct to a
373 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
374 * are to be zero-filled.
375 * * If @usize > @ksize, then the userspace has passed a new struct to an
376 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
377 * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
378 *
379 * Returns (in all cases, some data may have been copied):
380 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
381 * * -EFAULT: access to userspace failed.
382 */
383 static __always_inline __must_check int
copy_struct_from_user(void * dst,size_t ksize,const void __user * src,size_t usize)384 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
385 size_t usize)
386 {
387 size_t size = min(ksize, usize);
388 size_t rest = max(ksize, usize) - size;
389
390 /* Double check if ksize is larger than a known object size. */
391 if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
392 return -E2BIG;
393
394 /* Deal with trailing bytes. */
395 if (usize < ksize) {
396 memset(dst + size, 0, rest);
397 } else if (usize > ksize) {
398 int ret = check_zeroed_user(src + size, rest);
399 if (ret <= 0)
400 return ret ?: -E2BIG;
401 }
402 /* Copy the interoperable parts of the struct. */
403 if (copy_from_user(dst, src, size))
404 return -EFAULT;
405 return 0;
406 }
407
408 /**
409 * copy_struct_to_user: copy a struct to userspace
410 * @dst: Destination address, in userspace. This buffer must be @ksize
411 * bytes long.
412 * @usize: (Alleged) size of @dst struct.
413 * @src: Source address, in kernel space.
414 * @ksize: Size of @src struct.
415 * @ignored_trailing: Set to %true if there was a non-zero byte in @src that
416 * userspace cannot see because they are using an smaller struct.
417 *
418 * Copies a struct from kernel space to userspace, in a way that guarantees
419 * backwards-compatibility for struct syscall arguments (as long as future
420 * struct extensions are made such that all new fields are *appended* to the
421 * old struct, and zeroed-out new fields have the same meaning as the old
422 * struct).
423 *
424 * Some syscalls may wish to make sure that userspace knows about everything in
425 * the struct, and if there is a non-zero value that userspce doesn't know
426 * about, they want to return an error (such as -EMSGSIZE) or have some other
427 * fallback (such as adding a "you're missing some information" flag). If
428 * @ignored_trailing is non-%NULL, it will be set to %true if there was a
429 * non-zero byte that could not be copied to userspace (ie. was past @usize).
430 *
431 * While unconditionally returning an error in this case is the simplest
432 * solution, for maximum backward compatibility you should try to only return
433 * -EMSGSIZE if the user explicitly requested the data that couldn't be copied.
434 * Note that structure sizes can change due to header changes and simple
435 * recompilations without code changes(!), so if you care about
436 * @ignored_trailing you probably want to make sure that any new field data is
437 * associated with a flag. Otherwise you might assume that a program knows
438 * about data it does not.
439 *
440 * @ksize is just sizeof(*src), and @usize should've been passed by userspace.
441 * The recommended usage is something like the following:
442 *
443 * SYSCALL_DEFINE2(foobar, struct foo __user *, uarg, size_t, usize)
444 * {
445 * int err;
446 * bool ignored_trailing;
447 * struct foo karg = {};
448 *
449 * if (usize > PAGE_SIZE)
450 * return -E2BIG;
451 * if (usize < FOO_SIZE_VER0)
452 * return -EINVAL;
453 *
454 * // ... modify karg somehow ...
455 *
456 * err = copy_struct_to_user(uarg, usize, &karg, sizeof(karg),
457 * &ignored_trailing);
458 * if (err)
459 * return err;
460 * if (ignored_trailing)
461 * return -EMSGSIZE:
462 *
463 * // ...
464 * }
465 *
466 * There are three cases to consider:
467 * * If @usize == @ksize, then it's copied verbatim.
468 * * If @usize < @ksize, then the kernel is trying to pass userspace a newer
469 * struct than it supports. Thus we only copy the interoperable portions
470 * (@usize) and ignore the rest (but @ignored_trailing is set to %true if
471 * any of the trailing (@ksize - @usize) bytes are non-zero).
472 * * If @usize > @ksize, then the kernel is trying to pass userspace an older
473 * struct than userspace supports. In order to make sure the
474 * unknown-to-the-kernel fields don't contain garbage values, we zero the
475 * trailing (@usize - @ksize) bytes.
476 *
477 * Returns (in all cases, some data may have been copied):
478 * * -EFAULT: access to userspace failed.
479 */
480 static __always_inline __must_check int
copy_struct_to_user(void __user * dst,size_t usize,const void * src,size_t ksize,bool * ignored_trailing)481 copy_struct_to_user(void __user *dst, size_t usize, const void *src,
482 size_t ksize, bool *ignored_trailing)
483 {
484 size_t size = min(ksize, usize);
485 size_t rest = max(ksize, usize) - size;
486
487 /* Double check if ksize is larger than a known object size. */
488 if (WARN_ON_ONCE(ksize > __builtin_object_size(src, 1)))
489 return -E2BIG;
490
491 /* Deal with trailing bytes. */
492 if (usize > ksize) {
493 if (clear_user(dst + size, rest))
494 return -EFAULT;
495 }
496 if (ignored_trailing)
497 *ignored_trailing = ksize < usize &&
498 memchr_inv(src + size, 0, rest) != NULL;
499 /* Copy the interoperable parts of the struct. */
500 if (copy_to_user(dst, src, size))
501 return -EFAULT;
502 return 0;
503 }
504
505 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
506
507 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
508 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
509
510 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
511 long notrace copy_to_user_nofault(void __user *dst, const void *src,
512 size_t size);
513
514 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
515 long count);
516
517 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
518 long count);
519 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
520
521 #ifndef __get_kernel_nofault
522 #define __get_kernel_nofault(dst, src, type, label) \
523 do { \
524 type __user *p = (type __force __user *)(src); \
525 type data; \
526 if (__get_user(data, p)) \
527 goto label; \
528 *(type *)dst = data; \
529 } while (0)
530
531 #define __put_kernel_nofault(dst, src, type, label) \
532 do { \
533 type __user *p = (type __force __user *)(dst); \
534 type data = *(type *)src; \
535 if (__put_user(data, p)) \
536 goto label; \
537 } while (0)
538 #endif
539
540 /**
541 * get_kernel_nofault(): safely attempt to read from a location
542 * @val: read into this variable
543 * @ptr: address to read from
544 *
545 * Returns 0 on success, or -EFAULT.
546 */
547 #define get_kernel_nofault(val, ptr) ({ \
548 const typeof(val) *__gk_ptr = (ptr); \
549 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
550 })
551
552 #ifndef user_access_begin
553 #define user_access_begin(ptr,len) access_ok(ptr, len)
554 #define user_access_end() do { } while (0)
555 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
556 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
557 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
558 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
559 #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
user_access_save(void)560 static inline unsigned long user_access_save(void) { return 0UL; }
user_access_restore(unsigned long flags)561 static inline void user_access_restore(unsigned long flags) { }
562 #endif
563 #ifndef user_write_access_begin
564 #define user_write_access_begin user_access_begin
565 #define user_write_access_end user_access_end
566 #endif
567 #ifndef user_read_access_begin
568 #define user_read_access_begin user_access_begin
569 #define user_read_access_end user_access_end
570 #endif
571
572 #ifdef CONFIG_HARDENED_USERCOPY
573 void __noreturn usercopy_abort(const char *name, const char *detail,
574 bool to_user, unsigned long offset,
575 unsigned long len);
576 #endif
577
578 #endif /* __LINUX_UACCESS_H__ */
579