1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/read_write.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 #include <linux/slab.h>
9 #include <linux/stat.h>
10 #include <linux/sched/xacct.h>
11 #include <linux/fcntl.h>
12 #include <linux/file.h>
13 #include <linux/uio.h>
14 #include <linux/fsnotify.h>
15 #include <linux/security.h>
16 #include <linux/export.h>
17 #include <linux/syscalls.h>
18 #include <linux/pagemap.h>
19 #include <linux/splice.h>
20 #include <linux/compat.h>
21 #include <linux/mount.h>
22 #include <linux/fs.h>
23 #include "internal.h"
24
25 #include <linux/uaccess.h>
26 #include <asm/unistd.h>
27
28 const struct file_operations generic_ro_fops = {
29 .llseek = generic_file_llseek,
30 .read_iter = generic_file_read_iter,
31 .mmap = generic_file_readonly_mmap,
32 .splice_read = filemap_splice_read,
33 };
34
35 EXPORT_SYMBOL(generic_ro_fops);
36
unsigned_offsets(struct file * file)37 static inline bool unsigned_offsets(struct file *file)
38 {
39 return file->f_op->fop_flags & FOP_UNSIGNED_OFFSET;
40 }
41
42 /**
43 * vfs_setpos_cookie - update the file offset for lseek and reset cookie
44 * @file: file structure in question
45 * @offset: file offset to seek to
46 * @maxsize: maximum file size
47 * @cookie: cookie to reset
48 *
49 * Update the file offset to the value specified by @offset if the given
50 * offset is valid and it is not equal to the current file offset and
51 * reset the specified cookie to indicate that a seek happened.
52 *
53 * Return the specified offset on success and -EINVAL on invalid offset.
54 */
vfs_setpos_cookie(struct file * file,loff_t offset,loff_t maxsize,u64 * cookie)55 static loff_t vfs_setpos_cookie(struct file *file, loff_t offset,
56 loff_t maxsize, u64 *cookie)
57 {
58 if (offset < 0 && !unsigned_offsets(file))
59 return -EINVAL;
60 if (offset > maxsize)
61 return -EINVAL;
62
63 if (offset != file->f_pos) {
64 file->f_pos = offset;
65 if (cookie)
66 *cookie = 0;
67 }
68 return offset;
69 }
70
71 /**
72 * vfs_setpos - update the file offset for lseek
73 * @file: file structure in question
74 * @offset: file offset to seek to
75 * @maxsize: maximum file size
76 *
77 * This is a low-level filesystem helper for updating the file offset to
78 * the value specified by @offset if the given offset is valid and it is
79 * not equal to the current file offset.
80 *
81 * Return the specified offset on success and -EINVAL on invalid offset.
82 */
vfs_setpos(struct file * file,loff_t offset,loff_t maxsize)83 loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
84 {
85 return vfs_setpos_cookie(file, offset, maxsize, NULL);
86 }
87 EXPORT_SYMBOL(vfs_setpos);
88
89 /**
90 * must_set_pos - check whether f_pos has to be updated
91 * @file: file to seek on
92 * @offset: offset to use
93 * @whence: type of seek operation
94 * @eof: end of file
95 *
96 * Check whether f_pos needs to be updated and update @offset according
97 * to @whence.
98 *
99 * Return: 0 if f_pos doesn't need to be updated, 1 if f_pos has to be
100 * updated, and negative error code on failure.
101 */
must_set_pos(struct file * file,loff_t * offset,int whence,loff_t eof)102 static int must_set_pos(struct file *file, loff_t *offset, int whence, loff_t eof)
103 {
104 switch (whence) {
105 case SEEK_END:
106 *offset += eof;
107 break;
108 case SEEK_CUR:
109 /*
110 * Here we special-case the lseek(fd, 0, SEEK_CUR)
111 * position-querying operation. Avoid rewriting the "same"
112 * f_pos value back to the file because a concurrent read(),
113 * write() or lseek() might have altered it
114 */
115 if (*offset == 0) {
116 *offset = file->f_pos;
117 return 0;
118 }
119 break;
120 case SEEK_DATA:
121 /*
122 * In the generic case the entire file is data, so as long as
123 * offset isn't at the end of the file then the offset is data.
124 */
125 if ((unsigned long long)*offset >= eof)
126 return -ENXIO;
127 break;
128 case SEEK_HOLE:
129 /*
130 * There is a virtual hole at the end of the file, so as long as
131 * offset isn't i_size or larger, return i_size.
132 */
133 if ((unsigned long long)*offset >= eof)
134 return -ENXIO;
135 *offset = eof;
136 break;
137 }
138
139 return 1;
140 }
141
142 /**
143 * generic_file_llseek_size - generic llseek implementation for regular files
144 * @file: file structure to seek on
145 * @offset: file offset to seek to
146 * @whence: type of seek
147 * @maxsize: max size of this file in file system
148 * @eof: offset used for SEEK_END position
149 *
150 * This is a variant of generic_file_llseek that allows passing in a custom
151 * maximum file size and a custom EOF position, for e.g. hashed directories
152 *
153 * Synchronization:
154 * SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
155 * SEEK_CUR is synchronized against other SEEK_CURs, but not read/writes.
156 * read/writes behave like SEEK_SET against seeks.
157 */
158 loff_t
generic_file_llseek_size(struct file * file,loff_t offset,int whence,loff_t maxsize,loff_t eof)159 generic_file_llseek_size(struct file *file, loff_t offset, int whence,
160 loff_t maxsize, loff_t eof)
161 {
162 int ret;
163
164 ret = must_set_pos(file, &offset, whence, eof);
165 if (ret < 0)
166 return ret;
167 if (ret == 0)
168 return offset;
169
170 if (whence == SEEK_CUR) {
171 /*
172 * If the file requires locking via f_pos_lock we know
173 * that mutual exclusion for SEEK_CUR on the same file
174 * is guaranteed. If the file isn't locked, we take
175 * f_lock to protect against f_pos races with other
176 * SEEK_CURs.
177 */
178 if (file_seek_cur_needs_f_lock(file)) {
179 guard(spinlock)(&file->f_lock);
180 return vfs_setpos(file, file->f_pos + offset, maxsize);
181 }
182 return vfs_setpos(file, file->f_pos + offset, maxsize);
183 }
184
185 return vfs_setpos(file, offset, maxsize);
186 }
187 EXPORT_SYMBOL(generic_file_llseek_size);
188
189 /**
190 * generic_llseek_cookie - versioned llseek implementation
191 * @file: file structure to seek on
192 * @offset: file offset to seek to
193 * @whence: type of seek
194 * @cookie: cookie to update
195 *
196 * See generic_file_llseek for a general description and locking assumptions.
197 *
198 * In contrast to generic_file_llseek, this function also resets a
199 * specified cookie to indicate a seek took place.
200 */
generic_llseek_cookie(struct file * file,loff_t offset,int whence,u64 * cookie)201 loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence,
202 u64 *cookie)
203 {
204 struct inode *inode = file->f_mapping->host;
205 loff_t maxsize = inode->i_sb->s_maxbytes;
206 loff_t eof = i_size_read(inode);
207 int ret;
208
209 if (WARN_ON_ONCE(!cookie))
210 return -EINVAL;
211
212 /*
213 * Require that this is only used for directories that guarantee
214 * synchronization between readdir and seek so that an update to
215 * @cookie is correctly synchronized with concurrent readdir.
216 */
217 if (WARN_ON_ONCE(!(file->f_mode & FMODE_ATOMIC_POS)))
218 return -EINVAL;
219
220 ret = must_set_pos(file, &offset, whence, eof);
221 if (ret < 0)
222 return ret;
223 if (ret == 0)
224 return offset;
225
226 /* No need to hold f_lock because we know that f_pos_lock is held. */
227 if (whence == SEEK_CUR)
228 return vfs_setpos_cookie(file, file->f_pos + offset, maxsize, cookie);
229
230 return vfs_setpos_cookie(file, offset, maxsize, cookie);
231 }
232 EXPORT_SYMBOL(generic_llseek_cookie);
233
234 /**
235 * generic_file_llseek - generic llseek implementation for regular files
236 * @file: file structure to seek on
237 * @offset: file offset to seek to
238 * @whence: type of seek
239 *
240 * This is a generic implemenation of ->llseek useable for all normal local
241 * filesystems. It just updates the file offset to the value specified by
242 * @offset and @whence.
243 */
generic_file_llseek(struct file * file,loff_t offset,int whence)244 loff_t generic_file_llseek(struct file *file, loff_t offset, int whence)
245 {
246 struct inode *inode = file->f_mapping->host;
247
248 return generic_file_llseek_size(file, offset, whence,
249 inode->i_sb->s_maxbytes,
250 i_size_read(inode));
251 }
252 EXPORT_SYMBOL(generic_file_llseek);
253
254 /**
255 * fixed_size_llseek - llseek implementation for fixed-sized devices
256 * @file: file structure to seek on
257 * @offset: file offset to seek to
258 * @whence: type of seek
259 * @size: size of the file
260 *
261 */
fixed_size_llseek(struct file * file,loff_t offset,int whence,loff_t size)262 loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size)
263 {
264 switch (whence) {
265 case SEEK_SET: case SEEK_CUR: case SEEK_END:
266 return generic_file_llseek_size(file, offset, whence,
267 size, size);
268 default:
269 return -EINVAL;
270 }
271 }
272 EXPORT_SYMBOL(fixed_size_llseek);
273
274 /**
275 * no_seek_end_llseek - llseek implementation for fixed-sized devices
276 * @file: file structure to seek on
277 * @offset: file offset to seek to
278 * @whence: type of seek
279 *
280 */
no_seek_end_llseek(struct file * file,loff_t offset,int whence)281 loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence)
282 {
283 switch (whence) {
284 case SEEK_SET: case SEEK_CUR:
285 return generic_file_llseek_size(file, offset, whence,
286 OFFSET_MAX, 0);
287 default:
288 return -EINVAL;
289 }
290 }
291 EXPORT_SYMBOL(no_seek_end_llseek);
292
293 /**
294 * no_seek_end_llseek_size - llseek implementation for fixed-sized devices
295 * @file: file structure to seek on
296 * @offset: file offset to seek to
297 * @whence: type of seek
298 * @size: maximal offset allowed
299 *
300 */
no_seek_end_llseek_size(struct file * file,loff_t offset,int whence,loff_t size)301 loff_t no_seek_end_llseek_size(struct file *file, loff_t offset, int whence, loff_t size)
302 {
303 switch (whence) {
304 case SEEK_SET: case SEEK_CUR:
305 return generic_file_llseek_size(file, offset, whence,
306 size, 0);
307 default:
308 return -EINVAL;
309 }
310 }
311 EXPORT_SYMBOL(no_seek_end_llseek_size);
312
313 /**
314 * noop_llseek - No Operation Performed llseek implementation
315 * @file: file structure to seek on
316 * @offset: file offset to seek to
317 * @whence: type of seek
318 *
319 * This is an implementation of ->llseek useable for the rare special case when
320 * userspace expects the seek to succeed but the (device) file is actually not
321 * able to perform the seek. In this case you use noop_llseek() instead of
322 * falling back to the default implementation of ->llseek.
323 */
noop_llseek(struct file * file,loff_t offset,int whence)324 loff_t noop_llseek(struct file *file, loff_t offset, int whence)
325 {
326 return file->f_pos;
327 }
328 EXPORT_SYMBOL(noop_llseek);
329
default_llseek(struct file * file,loff_t offset,int whence)330 loff_t default_llseek(struct file *file, loff_t offset, int whence)
331 {
332 struct inode *inode = file_inode(file);
333 loff_t retval;
334
335 inode_lock(inode);
336 switch (whence) {
337 case SEEK_END:
338 offset += i_size_read(inode);
339 break;
340 case SEEK_CUR:
341 if (offset == 0) {
342 retval = file->f_pos;
343 goto out;
344 }
345 offset += file->f_pos;
346 break;
347 case SEEK_DATA:
348 /*
349 * In the generic case the entire file is data, so as
350 * long as offset isn't at the end of the file then the
351 * offset is data.
352 */
353 if (offset >= inode->i_size) {
354 retval = -ENXIO;
355 goto out;
356 }
357 break;
358 case SEEK_HOLE:
359 /*
360 * There is a virtual hole at the end of the file, so
361 * as long as offset isn't i_size or larger, return
362 * i_size.
363 */
364 if (offset >= inode->i_size) {
365 retval = -ENXIO;
366 goto out;
367 }
368 offset = inode->i_size;
369 break;
370 }
371 retval = -EINVAL;
372 if (offset >= 0 || unsigned_offsets(file)) {
373 if (offset != file->f_pos)
374 file->f_pos = offset;
375 retval = offset;
376 }
377 out:
378 inode_unlock(inode);
379 return retval;
380 }
381 EXPORT_SYMBOL(default_llseek);
382
vfs_llseek(struct file * file,loff_t offset,int whence)383 loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
384 {
385 if (!(file->f_mode & FMODE_LSEEK))
386 return -ESPIPE;
387 return file->f_op->llseek(file, offset, whence);
388 }
389 EXPORT_SYMBOL(vfs_llseek);
390
ksys_lseek(unsigned int fd,off_t offset,unsigned int whence)391 static off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence)
392 {
393 off_t retval;
394 CLASS(fd_pos, f)(fd);
395 if (fd_empty(f))
396 return -EBADF;
397
398 retval = -EINVAL;
399 if (whence <= SEEK_MAX) {
400 loff_t res = vfs_llseek(fd_file(f), offset, whence);
401 retval = res;
402 if (res != (loff_t)retval)
403 retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
404 }
405 return retval;
406 }
407
SYSCALL_DEFINE3(lseek,unsigned int,fd,off_t,offset,unsigned int,whence)408 SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
409 {
410 return ksys_lseek(fd, offset, whence);
411 }
412
413 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(lseek,unsigned int,fd,compat_off_t,offset,unsigned int,whence)414 COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
415 {
416 return ksys_lseek(fd, offset, whence);
417 }
418 #endif
419
420 #if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT) || \
421 defined(__ARCH_WANT_SYS_LLSEEK)
SYSCALL_DEFINE5(llseek,unsigned int,fd,unsigned long,offset_high,unsigned long,offset_low,loff_t __user *,result,unsigned int,whence)422 SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
423 unsigned long, offset_low, loff_t __user *, result,
424 unsigned int, whence)
425 {
426 int retval;
427 CLASS(fd_pos, f)(fd);
428 loff_t offset;
429
430 if (fd_empty(f))
431 return -EBADF;
432
433 if (whence > SEEK_MAX)
434 return -EINVAL;
435
436 offset = vfs_llseek(fd_file(f), ((loff_t) offset_high << 32) | offset_low,
437 whence);
438
439 retval = (int)offset;
440 if (offset >= 0) {
441 retval = -EFAULT;
442 if (!copy_to_user(result, &offset, sizeof(offset)))
443 retval = 0;
444 }
445 return retval;
446 }
447 #endif
448
rw_verify_area(int read_write,struct file * file,const loff_t * ppos,size_t count)449 int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count)
450 {
451 int mask = read_write == READ ? MAY_READ : MAY_WRITE;
452 int ret;
453
454 if (unlikely((ssize_t) count < 0))
455 return -EINVAL;
456
457 if (ppos) {
458 loff_t pos = *ppos;
459
460 if (unlikely(pos < 0)) {
461 if (!unsigned_offsets(file))
462 return -EINVAL;
463 if (count >= -pos) /* both values are in 0..LLONG_MAX */
464 return -EOVERFLOW;
465 } else if (unlikely((loff_t) (pos + count) < 0)) {
466 if (!unsigned_offsets(file))
467 return -EINVAL;
468 }
469 }
470
471 ret = security_file_permission(file, mask);
472 if (ret)
473 return ret;
474
475 return fsnotify_file_area_perm(file, mask, ppos, count);
476 }
477 EXPORT_SYMBOL(rw_verify_area);
478
new_sync_read(struct file * filp,char __user * buf,size_t len,loff_t * ppos)479 static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
480 {
481 struct kiocb kiocb;
482 struct iov_iter iter;
483 ssize_t ret;
484
485 init_sync_kiocb(&kiocb, filp);
486 kiocb.ki_pos = (ppos ? *ppos : 0);
487 iov_iter_ubuf(&iter, ITER_DEST, buf, len);
488
489 ret = filp->f_op->read_iter(&kiocb, &iter);
490 BUG_ON(ret == -EIOCBQUEUED);
491 if (ppos)
492 *ppos = kiocb.ki_pos;
493 return ret;
494 }
495
warn_unsupported(struct file * file,const char * op)496 static int warn_unsupported(struct file *file, const char *op)
497 {
498 pr_warn_ratelimited(
499 "kernel %s not supported for file %pD4 (pid: %d comm: %.20s)\n",
500 op, file, current->pid, current->comm);
501 return -EINVAL;
502 }
503
__kernel_read(struct file * file,void * buf,size_t count,loff_t * pos)504 ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
505 {
506 struct kvec iov = {
507 .iov_base = buf,
508 .iov_len = min_t(size_t, count, MAX_RW_COUNT),
509 };
510 struct kiocb kiocb;
511 struct iov_iter iter;
512 ssize_t ret;
513
514 if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ)))
515 return -EINVAL;
516 if (!(file->f_mode & FMODE_CAN_READ))
517 return -EINVAL;
518 /*
519 * Also fail if ->read_iter and ->read are both wired up as that
520 * implies very convoluted semantics.
521 */
522 if (unlikely(!file->f_op->read_iter || file->f_op->read))
523 return warn_unsupported(file, "read");
524
525 init_sync_kiocb(&kiocb, file);
526 kiocb.ki_pos = pos ? *pos : 0;
527 iov_iter_kvec(&iter, ITER_DEST, &iov, 1, iov.iov_len);
528 ret = file->f_op->read_iter(&kiocb, &iter);
529 if (ret > 0) {
530 if (pos)
531 *pos = kiocb.ki_pos;
532 fsnotify_access(file);
533 add_rchar(current, ret);
534 }
535 inc_syscr(current);
536 return ret;
537 }
538
kernel_read(struct file * file,void * buf,size_t count,loff_t * pos)539 ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
540 {
541 ssize_t ret;
542
543 ret = rw_verify_area(READ, file, pos, count);
544 if (ret)
545 return ret;
546 return __kernel_read(file, buf, count, pos);
547 }
548 EXPORT_SYMBOL(kernel_read);
549
vfs_read(struct file * file,char __user * buf,size_t count,loff_t * pos)550 ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
551 {
552 ssize_t ret;
553
554 if (!(file->f_mode & FMODE_READ))
555 return -EBADF;
556 if (!(file->f_mode & FMODE_CAN_READ))
557 return -EINVAL;
558 if (unlikely(!access_ok(buf, count)))
559 return -EFAULT;
560
561 ret = rw_verify_area(READ, file, pos, count);
562 if (ret)
563 return ret;
564 if (count > MAX_RW_COUNT)
565 count = MAX_RW_COUNT;
566
567 if (file->f_op->read)
568 ret = file->f_op->read(file, buf, count, pos);
569 else if (file->f_op->read_iter)
570 ret = new_sync_read(file, buf, count, pos);
571 else
572 ret = -EINVAL;
573 if (ret > 0) {
574 fsnotify_access(file);
575 add_rchar(current, ret);
576 }
577 inc_syscr(current);
578 return ret;
579 }
580
new_sync_write(struct file * filp,const char __user * buf,size_t len,loff_t * ppos)581 static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
582 {
583 struct kiocb kiocb;
584 struct iov_iter iter;
585 ssize_t ret;
586
587 init_sync_kiocb(&kiocb, filp);
588 kiocb.ki_pos = (ppos ? *ppos : 0);
589 iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)buf, len);
590
591 ret = filp->f_op->write_iter(&kiocb, &iter);
592 BUG_ON(ret == -EIOCBQUEUED);
593 if (ret > 0 && ppos)
594 *ppos = kiocb.ki_pos;
595 return ret;
596 }
597
598 /* caller is responsible for file_start_write/file_end_write */
__kernel_write_iter(struct file * file,struct iov_iter * from,loff_t * pos)599 ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *pos)
600 {
601 struct kiocb kiocb;
602 ssize_t ret;
603
604 if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE)))
605 return -EBADF;
606 if (!(file->f_mode & FMODE_CAN_WRITE))
607 return -EINVAL;
608 /*
609 * Also fail if ->write_iter and ->write are both wired up as that
610 * implies very convoluted semantics.
611 */
612 if (unlikely(!file->f_op->write_iter || file->f_op->write))
613 return warn_unsupported(file, "write");
614
615 init_sync_kiocb(&kiocb, file);
616 kiocb.ki_pos = pos ? *pos : 0;
617 ret = file->f_op->write_iter(&kiocb, from);
618 if (ret > 0) {
619 if (pos)
620 *pos = kiocb.ki_pos;
621 fsnotify_modify(file);
622 add_wchar(current, ret);
623 }
624 inc_syscw(current);
625 return ret;
626 }
627
628 /* caller is responsible for file_start_write/file_end_write */
__kernel_write(struct file * file,const void * buf,size_t count,loff_t * pos)629 ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
630 {
631 struct kvec iov = {
632 .iov_base = (void *)buf,
633 .iov_len = min_t(size_t, count, MAX_RW_COUNT),
634 };
635 struct iov_iter iter;
636 iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, iov.iov_len);
637 return __kernel_write_iter(file, &iter, pos);
638 }
639 /*
640 * This "EXPORT_SYMBOL_GPL()" is more of a "EXPORT_SYMBOL_DONTUSE()",
641 * but autofs is one of the few internal kernel users that actually
642 * wants this _and_ can be built as a module. So we need to export
643 * this symbol for autofs, even though it really isn't appropriate
644 * for any other kernel modules.
645 */
646 EXPORT_SYMBOL_GPL(__kernel_write);
647
kernel_write(struct file * file,const void * buf,size_t count,loff_t * pos)648 ssize_t kernel_write(struct file *file, const void *buf, size_t count,
649 loff_t *pos)
650 {
651 ssize_t ret;
652
653 ret = rw_verify_area(WRITE, file, pos, count);
654 if (ret)
655 return ret;
656
657 file_start_write(file);
658 ret = __kernel_write(file, buf, count, pos);
659 file_end_write(file);
660 return ret;
661 }
662 EXPORT_SYMBOL(kernel_write);
663
vfs_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)664 ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
665 {
666 ssize_t ret;
667
668 if (!(file->f_mode & FMODE_WRITE))
669 return -EBADF;
670 if (!(file->f_mode & FMODE_CAN_WRITE))
671 return -EINVAL;
672 if (unlikely(!access_ok(buf, count)))
673 return -EFAULT;
674
675 ret = rw_verify_area(WRITE, file, pos, count);
676 if (ret)
677 return ret;
678 if (count > MAX_RW_COUNT)
679 count = MAX_RW_COUNT;
680 file_start_write(file);
681 if (file->f_op->write)
682 ret = file->f_op->write(file, buf, count, pos);
683 else if (file->f_op->write_iter)
684 ret = new_sync_write(file, buf, count, pos);
685 else
686 ret = -EINVAL;
687 if (ret > 0) {
688 fsnotify_modify(file);
689 add_wchar(current, ret);
690 }
691 inc_syscw(current);
692 file_end_write(file);
693 return ret;
694 }
695
696 /* file_ppos returns &file->f_pos or NULL if file is stream */
file_ppos(struct file * file)697 static inline loff_t *file_ppos(struct file *file)
698 {
699 return file->f_mode & FMODE_STREAM ? NULL : &file->f_pos;
700 }
701
ksys_read(unsigned int fd,char __user * buf,size_t count)702 ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
703 {
704 CLASS(fd_pos, f)(fd);
705 ssize_t ret = -EBADF;
706
707 if (!fd_empty(f)) {
708 loff_t pos, *ppos = file_ppos(fd_file(f));
709 if (ppos) {
710 pos = *ppos;
711 ppos = &pos;
712 }
713 ret = vfs_read(fd_file(f), buf, count, ppos);
714 if (ret >= 0 && ppos)
715 fd_file(f)->f_pos = pos;
716 }
717 return ret;
718 }
719
SYSCALL_DEFINE3(read,unsigned int,fd,char __user *,buf,size_t,count)720 SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
721 {
722 return ksys_read(fd, buf, count);
723 }
724
ksys_write(unsigned int fd,const char __user * buf,size_t count)725 ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count)
726 {
727 CLASS(fd_pos, f)(fd);
728 ssize_t ret = -EBADF;
729
730 if (!fd_empty(f)) {
731 loff_t pos, *ppos = file_ppos(fd_file(f));
732 if (ppos) {
733 pos = *ppos;
734 ppos = &pos;
735 }
736 ret = vfs_write(fd_file(f), buf, count, ppos);
737 if (ret >= 0 && ppos)
738 fd_file(f)->f_pos = pos;
739 }
740
741 return ret;
742 }
743
SYSCALL_DEFINE3(write,unsigned int,fd,const char __user *,buf,size_t,count)744 SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
745 size_t, count)
746 {
747 return ksys_write(fd, buf, count);
748 }
749
ksys_pread64(unsigned int fd,char __user * buf,size_t count,loff_t pos)750 ssize_t ksys_pread64(unsigned int fd, char __user *buf, size_t count,
751 loff_t pos)
752 {
753 if (pos < 0)
754 return -EINVAL;
755
756 CLASS(fd, f)(fd);
757 if (fd_empty(f))
758 return -EBADF;
759
760 if (fd_file(f)->f_mode & FMODE_PREAD)
761 return vfs_read(fd_file(f), buf, count, &pos);
762
763 return -ESPIPE;
764 }
765
SYSCALL_DEFINE4(pread64,unsigned int,fd,char __user *,buf,size_t,count,loff_t,pos)766 SYSCALL_DEFINE4(pread64, unsigned int, fd, char __user *, buf,
767 size_t, count, loff_t, pos)
768 {
769 return ksys_pread64(fd, buf, count, pos);
770 }
771
772 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PREAD64)
COMPAT_SYSCALL_DEFINE5(pread64,unsigned int,fd,char __user *,buf,size_t,count,compat_arg_u64_dual (pos))773 COMPAT_SYSCALL_DEFINE5(pread64, unsigned int, fd, char __user *, buf,
774 size_t, count, compat_arg_u64_dual(pos))
775 {
776 return ksys_pread64(fd, buf, count, compat_arg_u64_glue(pos));
777 }
778 #endif
779
ksys_pwrite64(unsigned int fd,const char __user * buf,size_t count,loff_t pos)780 ssize_t ksys_pwrite64(unsigned int fd, const char __user *buf,
781 size_t count, loff_t pos)
782 {
783 if (pos < 0)
784 return -EINVAL;
785
786 CLASS(fd, f)(fd);
787 if (fd_empty(f))
788 return -EBADF;
789
790 if (fd_file(f)->f_mode & FMODE_PWRITE)
791 return vfs_write(fd_file(f), buf, count, &pos);
792
793 return -ESPIPE;
794 }
795
SYSCALL_DEFINE4(pwrite64,unsigned int,fd,const char __user *,buf,size_t,count,loff_t,pos)796 SYSCALL_DEFINE4(pwrite64, unsigned int, fd, const char __user *, buf,
797 size_t, count, loff_t, pos)
798 {
799 return ksys_pwrite64(fd, buf, count, pos);
800 }
801
802 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_PWRITE64)
COMPAT_SYSCALL_DEFINE5(pwrite64,unsigned int,fd,const char __user *,buf,size_t,count,compat_arg_u64_dual (pos))803 COMPAT_SYSCALL_DEFINE5(pwrite64, unsigned int, fd, const char __user *, buf,
804 size_t, count, compat_arg_u64_dual(pos))
805 {
806 return ksys_pwrite64(fd, buf, count, compat_arg_u64_glue(pos));
807 }
808 #endif
809
do_iter_readv_writev(struct file * filp,struct iov_iter * iter,loff_t * ppos,int type,rwf_t flags)810 static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
811 loff_t *ppos, int type, rwf_t flags)
812 {
813 struct kiocb kiocb;
814 ssize_t ret;
815
816 init_sync_kiocb(&kiocb, filp);
817 ret = kiocb_set_rw_flags(&kiocb, flags, type);
818 if (ret)
819 return ret;
820 kiocb.ki_pos = (ppos ? *ppos : 0);
821
822 if (type == READ)
823 ret = filp->f_op->read_iter(&kiocb, iter);
824 else
825 ret = filp->f_op->write_iter(&kiocb, iter);
826 BUG_ON(ret == -EIOCBQUEUED);
827 if (ppos)
828 *ppos = kiocb.ki_pos;
829 return ret;
830 }
831
832 /* Do it by hand, with file-ops */
do_loop_readv_writev(struct file * filp,struct iov_iter * iter,loff_t * ppos,int type,rwf_t flags)833 static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
834 loff_t *ppos, int type, rwf_t flags)
835 {
836 ssize_t ret = 0;
837
838 if (flags & ~RWF_HIPRI)
839 return -EOPNOTSUPP;
840
841 while (iov_iter_count(iter)) {
842 ssize_t nr;
843
844 if (type == READ) {
845 nr = filp->f_op->read(filp, iter_iov_addr(iter),
846 iter_iov_len(iter), ppos);
847 } else {
848 nr = filp->f_op->write(filp, iter_iov_addr(iter),
849 iter_iov_len(iter), ppos);
850 }
851
852 if (nr < 0) {
853 if (!ret)
854 ret = nr;
855 break;
856 }
857 ret += nr;
858 if (nr != iter_iov_len(iter))
859 break;
860 iov_iter_advance(iter, nr);
861 }
862
863 return ret;
864 }
865
vfs_iocb_iter_read(struct file * file,struct kiocb * iocb,struct iov_iter * iter)866 ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
867 struct iov_iter *iter)
868 {
869 size_t tot_len;
870 ssize_t ret = 0;
871
872 if (!file->f_op->read_iter)
873 return -EINVAL;
874 if (!(file->f_mode & FMODE_READ))
875 return -EBADF;
876 if (!(file->f_mode & FMODE_CAN_READ))
877 return -EINVAL;
878
879 tot_len = iov_iter_count(iter);
880 if (!tot_len)
881 goto out;
882 ret = rw_verify_area(READ, file, &iocb->ki_pos, tot_len);
883 if (ret < 0)
884 return ret;
885
886 ret = file->f_op->read_iter(iocb, iter);
887 out:
888 if (ret >= 0)
889 fsnotify_access(file);
890 return ret;
891 }
892 EXPORT_SYMBOL(vfs_iocb_iter_read);
893
vfs_iter_read(struct file * file,struct iov_iter * iter,loff_t * ppos,rwf_t flags)894 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
895 rwf_t flags)
896 {
897 size_t tot_len;
898 ssize_t ret = 0;
899
900 if (!file->f_op->read_iter)
901 return -EINVAL;
902 if (!(file->f_mode & FMODE_READ))
903 return -EBADF;
904 if (!(file->f_mode & FMODE_CAN_READ))
905 return -EINVAL;
906
907 tot_len = iov_iter_count(iter);
908 if (!tot_len)
909 goto out;
910 ret = rw_verify_area(READ, file, ppos, tot_len);
911 if (ret < 0)
912 return ret;
913
914 ret = do_iter_readv_writev(file, iter, ppos, READ, flags);
915 out:
916 if (ret >= 0)
917 fsnotify_access(file);
918 return ret;
919 }
920 EXPORT_SYMBOL(vfs_iter_read);
921
922 /*
923 * Caller is responsible for calling kiocb_end_write() on completion
924 * if async iocb was queued.
925 */
vfs_iocb_iter_write(struct file * file,struct kiocb * iocb,struct iov_iter * iter)926 ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
927 struct iov_iter *iter)
928 {
929 size_t tot_len;
930 ssize_t ret = 0;
931
932 if (!file->f_op->write_iter)
933 return -EINVAL;
934 if (!(file->f_mode & FMODE_WRITE))
935 return -EBADF;
936 if (!(file->f_mode & FMODE_CAN_WRITE))
937 return -EINVAL;
938
939 tot_len = iov_iter_count(iter);
940 if (!tot_len)
941 return 0;
942 ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len);
943 if (ret < 0)
944 return ret;
945
946 kiocb_start_write(iocb);
947 ret = file->f_op->write_iter(iocb, iter);
948 if (ret != -EIOCBQUEUED)
949 kiocb_end_write(iocb);
950 if (ret > 0)
951 fsnotify_modify(file);
952
953 return ret;
954 }
955 EXPORT_SYMBOL(vfs_iocb_iter_write);
956
vfs_iter_write(struct file * file,struct iov_iter * iter,loff_t * ppos,rwf_t flags)957 ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
958 rwf_t flags)
959 {
960 size_t tot_len;
961 ssize_t ret;
962
963 if (!(file->f_mode & FMODE_WRITE))
964 return -EBADF;
965 if (!(file->f_mode & FMODE_CAN_WRITE))
966 return -EINVAL;
967 if (!file->f_op->write_iter)
968 return -EINVAL;
969
970 tot_len = iov_iter_count(iter);
971 if (!tot_len)
972 return 0;
973
974 ret = rw_verify_area(WRITE, file, ppos, tot_len);
975 if (ret < 0)
976 return ret;
977
978 file_start_write(file);
979 ret = do_iter_readv_writev(file, iter, ppos, WRITE, flags);
980 if (ret > 0)
981 fsnotify_modify(file);
982 file_end_write(file);
983
984 return ret;
985 }
986 EXPORT_SYMBOL(vfs_iter_write);
987
vfs_readv(struct file * file,const struct iovec __user * vec,unsigned long vlen,loff_t * pos,rwf_t flags)988 static ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
989 unsigned long vlen, loff_t *pos, rwf_t flags)
990 {
991 struct iovec iovstack[UIO_FASTIOV];
992 struct iovec *iov = iovstack;
993 struct iov_iter iter;
994 size_t tot_len;
995 ssize_t ret = 0;
996
997 if (!(file->f_mode & FMODE_READ))
998 return -EBADF;
999 if (!(file->f_mode & FMODE_CAN_READ))
1000 return -EINVAL;
1001
1002 ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov,
1003 &iter);
1004 if (ret < 0)
1005 return ret;
1006
1007 tot_len = iov_iter_count(&iter);
1008 if (!tot_len)
1009 goto out;
1010
1011 ret = rw_verify_area(READ, file, pos, tot_len);
1012 if (ret < 0)
1013 goto out;
1014
1015 if (file->f_op->read_iter)
1016 ret = do_iter_readv_writev(file, &iter, pos, READ, flags);
1017 else
1018 ret = do_loop_readv_writev(file, &iter, pos, READ, flags);
1019 out:
1020 if (ret >= 0)
1021 fsnotify_access(file);
1022 kfree(iov);
1023 return ret;
1024 }
1025
vfs_writev(struct file * file,const struct iovec __user * vec,unsigned long vlen,loff_t * pos,rwf_t flags)1026 static ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
1027 unsigned long vlen, loff_t *pos, rwf_t flags)
1028 {
1029 struct iovec iovstack[UIO_FASTIOV];
1030 struct iovec *iov = iovstack;
1031 struct iov_iter iter;
1032 size_t tot_len;
1033 ssize_t ret = 0;
1034
1035 if (!(file->f_mode & FMODE_WRITE))
1036 return -EBADF;
1037 if (!(file->f_mode & FMODE_CAN_WRITE))
1038 return -EINVAL;
1039
1040 ret = import_iovec(ITER_SOURCE, vec, vlen, ARRAY_SIZE(iovstack), &iov,
1041 &iter);
1042 if (ret < 0)
1043 return ret;
1044
1045 tot_len = iov_iter_count(&iter);
1046 if (!tot_len)
1047 goto out;
1048
1049 ret = rw_verify_area(WRITE, file, pos, tot_len);
1050 if (ret < 0)
1051 goto out;
1052
1053 file_start_write(file);
1054 if (file->f_op->write_iter)
1055 ret = do_iter_readv_writev(file, &iter, pos, WRITE, flags);
1056 else
1057 ret = do_loop_readv_writev(file, &iter, pos, WRITE, flags);
1058 if (ret > 0)
1059 fsnotify_modify(file);
1060 file_end_write(file);
1061 out:
1062 kfree(iov);
1063 return ret;
1064 }
1065
do_readv(unsigned long fd,const struct iovec __user * vec,unsigned long vlen,rwf_t flags)1066 static ssize_t do_readv(unsigned long fd, const struct iovec __user *vec,
1067 unsigned long vlen, rwf_t flags)
1068 {
1069 CLASS(fd_pos, f)(fd);
1070 ssize_t ret = -EBADF;
1071
1072 if (!fd_empty(f)) {
1073 loff_t pos, *ppos = file_ppos(fd_file(f));
1074 if (ppos) {
1075 pos = *ppos;
1076 ppos = &pos;
1077 }
1078 ret = vfs_readv(fd_file(f), vec, vlen, ppos, flags);
1079 if (ret >= 0 && ppos)
1080 fd_file(f)->f_pos = pos;
1081 }
1082
1083 if (ret > 0)
1084 add_rchar(current, ret);
1085 inc_syscr(current);
1086 return ret;
1087 }
1088
do_writev(unsigned long fd,const struct iovec __user * vec,unsigned long vlen,rwf_t flags)1089 static ssize_t do_writev(unsigned long fd, const struct iovec __user *vec,
1090 unsigned long vlen, rwf_t flags)
1091 {
1092 CLASS(fd_pos, f)(fd);
1093 ssize_t ret = -EBADF;
1094
1095 if (!fd_empty(f)) {
1096 loff_t pos, *ppos = file_ppos(fd_file(f));
1097 if (ppos) {
1098 pos = *ppos;
1099 ppos = &pos;
1100 }
1101 ret = vfs_writev(fd_file(f), vec, vlen, ppos, flags);
1102 if (ret >= 0 && ppos)
1103 fd_file(f)->f_pos = pos;
1104 }
1105
1106 if (ret > 0)
1107 add_wchar(current, ret);
1108 inc_syscw(current);
1109 return ret;
1110 }
1111
pos_from_hilo(unsigned long high,unsigned long low)1112 static inline loff_t pos_from_hilo(unsigned long high, unsigned long low)
1113 {
1114 #define HALF_LONG_BITS (BITS_PER_LONG / 2)
1115 return (((loff_t)high << HALF_LONG_BITS) << HALF_LONG_BITS) | low;
1116 }
1117
do_preadv(unsigned long fd,const struct iovec __user * vec,unsigned long vlen,loff_t pos,rwf_t flags)1118 static ssize_t do_preadv(unsigned long fd, const struct iovec __user *vec,
1119 unsigned long vlen, loff_t pos, rwf_t flags)
1120 {
1121 ssize_t ret = -EBADF;
1122
1123 if (pos < 0)
1124 return -EINVAL;
1125
1126 CLASS(fd, f)(fd);
1127 if (!fd_empty(f)) {
1128 ret = -ESPIPE;
1129 if (fd_file(f)->f_mode & FMODE_PREAD)
1130 ret = vfs_readv(fd_file(f), vec, vlen, &pos, flags);
1131 }
1132
1133 if (ret > 0)
1134 add_rchar(current, ret);
1135 inc_syscr(current);
1136 return ret;
1137 }
1138
do_pwritev(unsigned long fd,const struct iovec __user * vec,unsigned long vlen,loff_t pos,rwf_t flags)1139 static ssize_t do_pwritev(unsigned long fd, const struct iovec __user *vec,
1140 unsigned long vlen, loff_t pos, rwf_t flags)
1141 {
1142 ssize_t ret = -EBADF;
1143
1144 if (pos < 0)
1145 return -EINVAL;
1146
1147 CLASS(fd, f)(fd);
1148 if (!fd_empty(f)) {
1149 ret = -ESPIPE;
1150 if (fd_file(f)->f_mode & FMODE_PWRITE)
1151 ret = vfs_writev(fd_file(f), vec, vlen, &pos, flags);
1152 }
1153
1154 if (ret > 0)
1155 add_wchar(current, ret);
1156 inc_syscw(current);
1157 return ret;
1158 }
1159
SYSCALL_DEFINE3(readv,unsigned long,fd,const struct iovec __user *,vec,unsigned long,vlen)1160 SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
1161 unsigned long, vlen)
1162 {
1163 return do_readv(fd, vec, vlen, 0);
1164 }
1165
SYSCALL_DEFINE3(writev,unsigned long,fd,const struct iovec __user *,vec,unsigned long,vlen)1166 SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
1167 unsigned long, vlen)
1168 {
1169 return do_writev(fd, vec, vlen, 0);
1170 }
1171
SYSCALL_DEFINE5(preadv,unsigned long,fd,const struct iovec __user *,vec,unsigned long,vlen,unsigned long,pos_l,unsigned long,pos_h)1172 SYSCALL_DEFINE5(preadv, unsigned long, fd, const struct iovec __user *, vec,
1173 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
1174 {
1175 loff_t pos = pos_from_hilo(pos_h, pos_l);
1176
1177 return do_preadv(fd, vec, vlen, pos, 0);
1178 }
1179
SYSCALL_DEFINE6(preadv2,unsigned long,fd,const struct iovec __user *,vec,unsigned long,vlen,unsigned long,pos_l,unsigned long,pos_h,rwf_t,flags)1180 SYSCALL_DEFINE6(preadv2, unsigned long, fd, const struct iovec __user *, vec,
1181 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
1182 rwf_t, flags)
1183 {
1184 loff_t pos = pos_from_hilo(pos_h, pos_l);
1185
1186 if (pos == -1)
1187 return do_readv(fd, vec, vlen, flags);
1188
1189 return do_preadv(fd, vec, vlen, pos, flags);
1190 }
1191
SYSCALL_DEFINE5(pwritev,unsigned long,fd,const struct iovec __user *,vec,unsigned long,vlen,unsigned long,pos_l,unsigned long,pos_h)1192 SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec,
1193 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h)
1194 {
1195 loff_t pos = pos_from_hilo(pos_h, pos_l);
1196
1197 return do_pwritev(fd, vec, vlen, pos, 0);
1198 }
1199
SYSCALL_DEFINE6(pwritev2,unsigned long,fd,const struct iovec __user *,vec,unsigned long,vlen,unsigned long,pos_l,unsigned long,pos_h,rwf_t,flags)1200 SYSCALL_DEFINE6(pwritev2, unsigned long, fd, const struct iovec __user *, vec,
1201 unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h,
1202 rwf_t, flags)
1203 {
1204 loff_t pos = pos_from_hilo(pos_h, pos_l);
1205
1206 if (pos == -1)
1207 return do_writev(fd, vec, vlen, flags);
1208
1209 return do_pwritev(fd, vec, vlen, pos, flags);
1210 }
1211
1212 /*
1213 * Various compat syscalls. Note that they all pretend to take a native
1214 * iovec - import_iovec will properly treat those as compat_iovecs based on
1215 * in_compat_syscall().
1216 */
1217 #ifdef CONFIG_COMPAT
1218 #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
COMPAT_SYSCALL_DEFINE4(preadv64,unsigned long,fd,const struct iovec __user *,vec,unsigned long,vlen,loff_t,pos)1219 COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
1220 const struct iovec __user *, vec,
1221 unsigned long, vlen, loff_t, pos)
1222 {
1223 return do_preadv(fd, vec, vlen, pos, 0);
1224 }
1225 #endif
1226
COMPAT_SYSCALL_DEFINE5(preadv,compat_ulong_t,fd,const struct iovec __user *,vec,compat_ulong_t,vlen,u32,pos_low,u32,pos_high)1227 COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
1228 const struct iovec __user *, vec,
1229 compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
1230 {
1231 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1232
1233 return do_preadv(fd, vec, vlen, pos, 0);
1234 }
1235
1236 #ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
COMPAT_SYSCALL_DEFINE5(preadv64v2,unsigned long,fd,const struct iovec __user *,vec,unsigned long,vlen,loff_t,pos,rwf_t,flags)1237 COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
1238 const struct iovec __user *, vec,
1239 unsigned long, vlen, loff_t, pos, rwf_t, flags)
1240 {
1241 if (pos == -1)
1242 return do_readv(fd, vec, vlen, flags);
1243 return do_preadv(fd, vec, vlen, pos, flags);
1244 }
1245 #endif
1246
COMPAT_SYSCALL_DEFINE6(preadv2,compat_ulong_t,fd,const struct iovec __user *,vec,compat_ulong_t,vlen,u32,pos_low,u32,pos_high,rwf_t,flags)1247 COMPAT_SYSCALL_DEFINE6(preadv2, compat_ulong_t, fd,
1248 const struct iovec __user *, vec,
1249 compat_ulong_t, vlen, u32, pos_low, u32, pos_high,
1250 rwf_t, flags)
1251 {
1252 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1253
1254 if (pos == -1)
1255 return do_readv(fd, vec, vlen, flags);
1256 return do_preadv(fd, vec, vlen, pos, flags);
1257 }
1258
1259 #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
COMPAT_SYSCALL_DEFINE4(pwritev64,unsigned long,fd,const struct iovec __user *,vec,unsigned long,vlen,loff_t,pos)1260 COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
1261 const struct iovec __user *, vec,
1262 unsigned long, vlen, loff_t, pos)
1263 {
1264 return do_pwritev(fd, vec, vlen, pos, 0);
1265 }
1266 #endif
1267
COMPAT_SYSCALL_DEFINE5(pwritev,compat_ulong_t,fd,const struct iovec __user *,vec,compat_ulong_t,vlen,u32,pos_low,u32,pos_high)1268 COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
1269 const struct iovec __user *,vec,
1270 compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
1271 {
1272 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1273
1274 return do_pwritev(fd, vec, vlen, pos, 0);
1275 }
1276
1277 #ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
COMPAT_SYSCALL_DEFINE5(pwritev64v2,unsigned long,fd,const struct iovec __user *,vec,unsigned long,vlen,loff_t,pos,rwf_t,flags)1278 COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
1279 const struct iovec __user *, vec,
1280 unsigned long, vlen, loff_t, pos, rwf_t, flags)
1281 {
1282 if (pos == -1)
1283 return do_writev(fd, vec, vlen, flags);
1284 return do_pwritev(fd, vec, vlen, pos, flags);
1285 }
1286 #endif
1287
COMPAT_SYSCALL_DEFINE6(pwritev2,compat_ulong_t,fd,const struct iovec __user *,vec,compat_ulong_t,vlen,u32,pos_low,u32,pos_high,rwf_t,flags)1288 COMPAT_SYSCALL_DEFINE6(pwritev2, compat_ulong_t, fd,
1289 const struct iovec __user *,vec,
1290 compat_ulong_t, vlen, u32, pos_low, u32, pos_high, rwf_t, flags)
1291 {
1292 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1293
1294 if (pos == -1)
1295 return do_writev(fd, vec, vlen, flags);
1296 return do_pwritev(fd, vec, vlen, pos, flags);
1297 }
1298 #endif /* CONFIG_COMPAT */
1299
do_sendfile(int out_fd,int in_fd,loff_t * ppos,size_t count,loff_t max)1300 static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
1301 size_t count, loff_t max)
1302 {
1303 struct inode *in_inode, *out_inode;
1304 struct pipe_inode_info *opipe;
1305 loff_t pos;
1306 loff_t out_pos;
1307 ssize_t retval;
1308 int fl;
1309
1310 /*
1311 * Get input file, and verify that it is ok..
1312 */
1313 CLASS(fd, in)(in_fd);
1314 if (fd_empty(in))
1315 return -EBADF;
1316 if (!(fd_file(in)->f_mode & FMODE_READ))
1317 return -EBADF;
1318 if (!ppos) {
1319 pos = fd_file(in)->f_pos;
1320 } else {
1321 pos = *ppos;
1322 if (!(fd_file(in)->f_mode & FMODE_PREAD))
1323 return -ESPIPE;
1324 }
1325 retval = rw_verify_area(READ, fd_file(in), &pos, count);
1326 if (retval < 0)
1327 return retval;
1328 if (count > MAX_RW_COUNT)
1329 count = MAX_RW_COUNT;
1330
1331 /*
1332 * Get output file, and verify that it is ok..
1333 */
1334 CLASS(fd, out)(out_fd);
1335 if (fd_empty(out))
1336 return -EBADF;
1337 if (!(fd_file(out)->f_mode & FMODE_WRITE))
1338 return -EBADF;
1339 in_inode = file_inode(fd_file(in));
1340 out_inode = file_inode(fd_file(out));
1341 out_pos = fd_file(out)->f_pos;
1342
1343 if (!max)
1344 max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
1345
1346 if (unlikely(pos + count > max)) {
1347 if (pos >= max)
1348 return -EOVERFLOW;
1349 count = max - pos;
1350 }
1351
1352 fl = 0;
1353 #if 0
1354 /*
1355 * We need to debate whether we can enable this or not. The
1356 * man page documents EAGAIN return for the output at least,
1357 * and the application is arguably buggy if it doesn't expect
1358 * EAGAIN on a non-blocking file descriptor.
1359 */
1360 if (fd_file(in)->f_flags & O_NONBLOCK)
1361 fl = SPLICE_F_NONBLOCK;
1362 #endif
1363 opipe = get_pipe_info(fd_file(out), true);
1364 if (!opipe) {
1365 retval = rw_verify_area(WRITE, fd_file(out), &out_pos, count);
1366 if (retval < 0)
1367 return retval;
1368 retval = do_splice_direct(fd_file(in), &pos, fd_file(out), &out_pos,
1369 count, fl);
1370 } else {
1371 if (fd_file(out)->f_flags & O_NONBLOCK)
1372 fl |= SPLICE_F_NONBLOCK;
1373
1374 retval = splice_file_to_pipe(fd_file(in), opipe, &pos, count, fl);
1375 }
1376
1377 if (retval > 0) {
1378 add_rchar(current, retval);
1379 add_wchar(current, retval);
1380 fsnotify_access(fd_file(in));
1381 fsnotify_modify(fd_file(out));
1382 fd_file(out)->f_pos = out_pos;
1383 if (ppos)
1384 *ppos = pos;
1385 else
1386 fd_file(in)->f_pos = pos;
1387 }
1388
1389 inc_syscr(current);
1390 inc_syscw(current);
1391 if (pos > max)
1392 retval = -EOVERFLOW;
1393 return retval;
1394 }
1395
SYSCALL_DEFINE4(sendfile,int,out_fd,int,in_fd,off_t __user *,offset,size_t,count)1396 SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd, off_t __user *, offset, size_t, count)
1397 {
1398 loff_t pos;
1399 off_t off;
1400 ssize_t ret;
1401
1402 if (offset) {
1403 if (unlikely(get_user(off, offset)))
1404 return -EFAULT;
1405 pos = off;
1406 ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
1407 if (unlikely(put_user(pos, offset)))
1408 return -EFAULT;
1409 return ret;
1410 }
1411
1412 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1413 }
1414
SYSCALL_DEFINE4(sendfile64,int,out_fd,int,in_fd,loff_t __user *,offset,size_t,count)1415 SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd, loff_t __user *, offset, size_t, count)
1416 {
1417 loff_t pos;
1418 ssize_t ret;
1419
1420 if (offset) {
1421 if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1422 return -EFAULT;
1423 ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
1424 if (unlikely(put_user(pos, offset)))
1425 return -EFAULT;
1426 return ret;
1427 }
1428
1429 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1430 }
1431
1432 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(sendfile,int,out_fd,int,in_fd,compat_off_t __user *,offset,compat_size_t,count)1433 COMPAT_SYSCALL_DEFINE4(sendfile, int, out_fd, int, in_fd,
1434 compat_off_t __user *, offset, compat_size_t, count)
1435 {
1436 loff_t pos;
1437 off_t off;
1438 ssize_t ret;
1439
1440 if (offset) {
1441 if (unlikely(get_user(off, offset)))
1442 return -EFAULT;
1443 pos = off;
1444 ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
1445 if (unlikely(put_user(pos, offset)))
1446 return -EFAULT;
1447 return ret;
1448 }
1449
1450 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1451 }
1452
COMPAT_SYSCALL_DEFINE4(sendfile64,int,out_fd,int,in_fd,compat_loff_t __user *,offset,compat_size_t,count)1453 COMPAT_SYSCALL_DEFINE4(sendfile64, int, out_fd, int, in_fd,
1454 compat_loff_t __user *, offset, compat_size_t, count)
1455 {
1456 loff_t pos;
1457 ssize_t ret;
1458
1459 if (offset) {
1460 if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1461 return -EFAULT;
1462 ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
1463 if (unlikely(put_user(pos, offset)))
1464 return -EFAULT;
1465 return ret;
1466 }
1467
1468 return do_sendfile(out_fd, in_fd, NULL, count, 0);
1469 }
1470 #endif
1471
1472 /*
1473 * Performs necessary checks before doing a file copy
1474 *
1475 * Can adjust amount of bytes to copy via @req_count argument.
1476 * Returns appropriate error code that caller should return or
1477 * zero in case the copy should be allowed.
1478 */
generic_copy_file_checks(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t * req_count,unsigned int flags)1479 static int generic_copy_file_checks(struct file *file_in, loff_t pos_in,
1480 struct file *file_out, loff_t pos_out,
1481 size_t *req_count, unsigned int flags)
1482 {
1483 struct inode *inode_in = file_inode(file_in);
1484 struct inode *inode_out = file_inode(file_out);
1485 uint64_t count = *req_count;
1486 loff_t size_in;
1487 int ret;
1488
1489 ret = generic_file_rw_checks(file_in, file_out);
1490 if (ret)
1491 return ret;
1492
1493 /*
1494 * We allow some filesystems to handle cross sb copy, but passing
1495 * a file of the wrong filesystem type to filesystem driver can result
1496 * in an attempt to dereference the wrong type of ->private_data, so
1497 * avoid doing that until we really have a good reason.
1498 *
1499 * nfs and cifs define several different file_system_type structures
1500 * and several different sets of file_operations, but they all end up
1501 * using the same ->copy_file_range() function pointer.
1502 */
1503 if (flags & COPY_FILE_SPLICE) {
1504 /* cross sb splice is allowed */
1505 } else if (file_out->f_op->copy_file_range) {
1506 if (file_in->f_op->copy_file_range !=
1507 file_out->f_op->copy_file_range)
1508 return -EXDEV;
1509 } else if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) {
1510 return -EXDEV;
1511 }
1512
1513 /* Don't touch certain kinds of inodes */
1514 if (IS_IMMUTABLE(inode_out))
1515 return -EPERM;
1516
1517 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
1518 return -ETXTBSY;
1519
1520 /* Ensure offsets don't wrap. */
1521 if (pos_in + count < pos_in || pos_out + count < pos_out)
1522 return -EOVERFLOW;
1523
1524 /* Shorten the copy to EOF */
1525 size_in = i_size_read(inode_in);
1526 if (pos_in >= size_in)
1527 count = 0;
1528 else
1529 count = min(count, size_in - (uint64_t)pos_in);
1530
1531 ret = generic_write_check_limits(file_out, pos_out, &count);
1532 if (ret)
1533 return ret;
1534
1535 /* Don't allow overlapped copying within the same file. */
1536 if (inode_in == inode_out &&
1537 pos_out + count > pos_in &&
1538 pos_out < pos_in + count)
1539 return -EINVAL;
1540
1541 *req_count = count;
1542 return 0;
1543 }
1544
1545 /*
1546 * copy_file_range() differs from regular file read and write in that it
1547 * specifically allows return partial success. When it does so is up to
1548 * the copy_file_range method.
1549 */
vfs_copy_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len,unsigned int flags)1550 ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
1551 struct file *file_out, loff_t pos_out,
1552 size_t len, unsigned int flags)
1553 {
1554 ssize_t ret;
1555 bool splice = flags & COPY_FILE_SPLICE;
1556 bool samesb = file_inode(file_in)->i_sb == file_inode(file_out)->i_sb;
1557
1558 if (flags & ~COPY_FILE_SPLICE)
1559 return -EINVAL;
1560
1561 ret = generic_copy_file_checks(file_in, pos_in, file_out, pos_out, &len,
1562 flags);
1563 if (unlikely(ret))
1564 return ret;
1565
1566 ret = rw_verify_area(READ, file_in, &pos_in, len);
1567 if (unlikely(ret))
1568 return ret;
1569
1570 ret = rw_verify_area(WRITE, file_out, &pos_out, len);
1571 if (unlikely(ret))
1572 return ret;
1573
1574 if (len == 0)
1575 return 0;
1576
1577 file_start_write(file_out);
1578
1579 /*
1580 * Cloning is supported by more file systems, so we implement copy on
1581 * same sb using clone, but for filesystems where both clone and copy
1582 * are supported (e.g. nfs,cifs), we only call the copy method.
1583 */
1584 if (!splice && file_out->f_op->copy_file_range) {
1585 ret = file_out->f_op->copy_file_range(file_in, pos_in,
1586 file_out, pos_out,
1587 len, flags);
1588 } else if (!splice && file_in->f_op->remap_file_range && samesb) {
1589 ret = file_in->f_op->remap_file_range(file_in, pos_in,
1590 file_out, pos_out,
1591 min_t(loff_t, MAX_RW_COUNT, len),
1592 REMAP_FILE_CAN_SHORTEN);
1593 /* fallback to splice */
1594 if (ret <= 0)
1595 splice = true;
1596 } else if (samesb) {
1597 /* Fallback to splice for same sb copy for backward compat */
1598 splice = true;
1599 }
1600
1601 file_end_write(file_out);
1602
1603 if (!splice)
1604 goto done;
1605
1606 /*
1607 * We can get here for same sb copy of filesystems that do not implement
1608 * ->copy_file_range() in case filesystem does not support clone or in
1609 * case filesystem supports clone but rejected the clone request (e.g.
1610 * because it was not block aligned).
1611 *
1612 * In both cases, fall back to kernel copy so we are able to maintain a
1613 * consistent story about which filesystems support copy_file_range()
1614 * and which filesystems do not, that will allow userspace tools to
1615 * make consistent desicions w.r.t using copy_file_range().
1616 *
1617 * We also get here if caller (e.g. nfsd) requested COPY_FILE_SPLICE
1618 * for server-side-copy between any two sb.
1619 *
1620 * In any case, we call do_splice_direct() and not splice_file_range(),
1621 * without file_start_write() held, to avoid possible deadlocks related
1622 * to splicing from input file, while file_start_write() is held on
1623 * the output file on a different sb.
1624 */
1625 ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out,
1626 min_t(size_t, len, MAX_RW_COUNT), 0);
1627 done:
1628 if (ret > 0) {
1629 fsnotify_access(file_in);
1630 add_rchar(current, ret);
1631 fsnotify_modify(file_out);
1632 add_wchar(current, ret);
1633 }
1634
1635 inc_syscr(current);
1636 inc_syscw(current);
1637
1638 return ret;
1639 }
1640 EXPORT_SYMBOL(vfs_copy_file_range);
1641
SYSCALL_DEFINE6(copy_file_range,int,fd_in,loff_t __user *,off_in,int,fd_out,loff_t __user *,off_out,size_t,len,unsigned int,flags)1642 SYSCALL_DEFINE6(copy_file_range, int, fd_in, loff_t __user *, off_in,
1643 int, fd_out, loff_t __user *, off_out,
1644 size_t, len, unsigned int, flags)
1645 {
1646 loff_t pos_in;
1647 loff_t pos_out;
1648 ssize_t ret = -EBADF;
1649
1650 CLASS(fd, f_in)(fd_in);
1651 if (fd_empty(f_in))
1652 return -EBADF;
1653
1654 CLASS(fd, f_out)(fd_out);
1655 if (fd_empty(f_out))
1656 return -EBADF;
1657
1658 if (off_in) {
1659 if (copy_from_user(&pos_in, off_in, sizeof(loff_t)))
1660 return -EFAULT;
1661 } else {
1662 pos_in = fd_file(f_in)->f_pos;
1663 }
1664
1665 if (off_out) {
1666 if (copy_from_user(&pos_out, off_out, sizeof(loff_t)))
1667 return -EFAULT;
1668 } else {
1669 pos_out = fd_file(f_out)->f_pos;
1670 }
1671
1672 if (flags != 0)
1673 return -EINVAL;
1674
1675 ret = vfs_copy_file_range(fd_file(f_in), pos_in, fd_file(f_out), pos_out, len,
1676 flags);
1677 if (ret > 0) {
1678 pos_in += ret;
1679 pos_out += ret;
1680
1681 if (off_in) {
1682 if (copy_to_user(off_in, &pos_in, sizeof(loff_t)))
1683 ret = -EFAULT;
1684 } else {
1685 fd_file(f_in)->f_pos = pos_in;
1686 }
1687
1688 if (off_out) {
1689 if (copy_to_user(off_out, &pos_out, sizeof(loff_t)))
1690 ret = -EFAULT;
1691 } else {
1692 fd_file(f_out)->f_pos = pos_out;
1693 }
1694 }
1695 return ret;
1696 }
1697
1698 /*
1699 * Don't operate on ranges the page cache doesn't support, and don't exceed the
1700 * LFS limits. If pos is under the limit it becomes a short access. If it
1701 * exceeds the limit we return -EFBIG.
1702 */
generic_write_check_limits(struct file * file,loff_t pos,loff_t * count)1703 int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count)
1704 {
1705 struct inode *inode = file->f_mapping->host;
1706 loff_t max_size = inode->i_sb->s_maxbytes;
1707 loff_t limit = rlimit(RLIMIT_FSIZE);
1708
1709 if (limit != RLIM_INFINITY) {
1710 if (pos >= limit) {
1711 send_sig(SIGXFSZ, current, 0);
1712 return -EFBIG;
1713 }
1714 *count = min(*count, limit - pos);
1715 }
1716
1717 if (!(file->f_flags & O_LARGEFILE))
1718 max_size = MAX_NON_LFS;
1719
1720 if (unlikely(pos >= max_size))
1721 return -EFBIG;
1722
1723 *count = min(*count, max_size - pos);
1724
1725 return 0;
1726 }
1727 EXPORT_SYMBOL_GPL(generic_write_check_limits);
1728
1729 /* Like generic_write_checks(), but takes size of write instead of iter. */
generic_write_checks_count(struct kiocb * iocb,loff_t * count)1730 int generic_write_checks_count(struct kiocb *iocb, loff_t *count)
1731 {
1732 struct file *file = iocb->ki_filp;
1733 struct inode *inode = file->f_mapping->host;
1734
1735 if (IS_SWAPFILE(inode))
1736 return -ETXTBSY;
1737
1738 if (!*count)
1739 return 0;
1740
1741 if (iocb->ki_flags & IOCB_APPEND)
1742 iocb->ki_pos = i_size_read(inode);
1743
1744 if ((iocb->ki_flags & IOCB_NOWAIT) &&
1745 !((iocb->ki_flags & IOCB_DIRECT) ||
1746 (file->f_op->fop_flags & FOP_BUFFER_WASYNC)))
1747 return -EINVAL;
1748
1749 return generic_write_check_limits(iocb->ki_filp, iocb->ki_pos, count);
1750 }
1751 EXPORT_SYMBOL(generic_write_checks_count);
1752
1753 /*
1754 * Performs necessary checks before doing a write
1755 *
1756 * Can adjust writing position or amount of bytes to write.
1757 * Returns appropriate error code that caller should return or
1758 * zero in case that write should be allowed.
1759 */
generic_write_checks(struct kiocb * iocb,struct iov_iter * from)1760 ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
1761 {
1762 loff_t count = iov_iter_count(from);
1763 int ret;
1764
1765 ret = generic_write_checks_count(iocb, &count);
1766 if (ret)
1767 return ret;
1768
1769 iov_iter_truncate(from, count);
1770 return iov_iter_count(from);
1771 }
1772 EXPORT_SYMBOL(generic_write_checks);
1773
1774 /*
1775 * Performs common checks before doing a file copy/clone
1776 * from @file_in to @file_out.
1777 */
generic_file_rw_checks(struct file * file_in,struct file * file_out)1778 int generic_file_rw_checks(struct file *file_in, struct file *file_out)
1779 {
1780 struct inode *inode_in = file_inode(file_in);
1781 struct inode *inode_out = file_inode(file_out);
1782
1783 /* Don't copy dirs, pipes, sockets... */
1784 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
1785 return -EISDIR;
1786 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
1787 return -EINVAL;
1788
1789 if (!(file_in->f_mode & FMODE_READ) ||
1790 !(file_out->f_mode & FMODE_WRITE) ||
1791 (file_out->f_flags & O_APPEND))
1792 return -EBADF;
1793
1794 return 0;
1795 }
1796
generic_atomic_write_valid(struct kiocb * iocb,struct iov_iter * iter)1797 int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter)
1798 {
1799 size_t len = iov_iter_count(iter);
1800
1801 if (!iter_is_ubuf(iter))
1802 return -EINVAL;
1803
1804 if (!is_power_of_2(len))
1805 return -EINVAL;
1806
1807 if (!IS_ALIGNED(iocb->ki_pos, len))
1808 return -EINVAL;
1809
1810 if (!(iocb->ki_flags & IOCB_DIRECT))
1811 return -EOPNOTSUPP;
1812
1813 return 0;
1814 }
1815 EXPORT_SYMBOL_GPL(generic_atomic_write_valid);
1816