1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/file.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/file.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * ext4 fs regular file handling primitives
17 *
18 * 64-bit file support on 64-bit platforms by Jakub Jelinek
19 * (jj@sunsite.ms.mff.cuni.cz)
20 */
21
22 #include <linux/time.h>
23 #include <linux/fs.h>
24 #include <linux/iomap.h>
25 #include <linux/mount.h>
26 #include <linux/path.h>
27 #include <linux/dax.h>
28 #include <linux/quotaops.h>
29 #include <linux/pagevec.h>
30 #include <linux/uio.h>
31 #include <linux/mman.h>
32 #include <linux/backing-dev.h>
33 #include "ext4.h"
34 #include "ext4_jbd2.h"
35 #include "xattr.h"
36 #include "acl.h"
37 #include "truncate.h"
38
ext4_dio_supported(struct inode * inode)39 static bool ext4_dio_supported(struct inode *inode)
40 {
41 if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
42 return false;
43 if (fsverity_active(inode))
44 return false;
45 if (ext4_should_journal_data(inode))
46 return false;
47 if (ext4_has_inline_data(inode))
48 return false;
49 return true;
50 }
51
ext4_dio_read_iter(struct kiocb * iocb,struct iov_iter * to)52 static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
53 {
54 ssize_t ret;
55 struct inode *inode = file_inode(iocb->ki_filp);
56
57 if (iocb->ki_flags & IOCB_NOWAIT) {
58 if (!inode_trylock_shared(inode))
59 return -EAGAIN;
60 } else {
61 inode_lock_shared(inode);
62 }
63
64 if (!ext4_dio_supported(inode)) {
65 inode_unlock_shared(inode);
66 /*
67 * Fallback to buffered I/O if the operation being performed on
68 * the inode is not supported by direct I/O. The IOCB_DIRECT
69 * flag needs to be cleared here in order to ensure that the
70 * direct I/O path within generic_file_read_iter() is not
71 * taken.
72 */
73 iocb->ki_flags &= ~IOCB_DIRECT;
74 return generic_file_read_iter(iocb, to);
75 }
76
77 ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
78 is_sync_kiocb(iocb));
79 inode_unlock_shared(inode);
80
81 file_accessed(iocb->ki_filp);
82 return ret;
83 }
84
85 #ifdef CONFIG_FS_DAX
ext4_dax_read_iter(struct kiocb * iocb,struct iov_iter * to)86 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
87 {
88 struct inode *inode = file_inode(iocb->ki_filp);
89 ssize_t ret;
90
91 if (iocb->ki_flags & IOCB_NOWAIT) {
92 if (!inode_trylock_shared(inode))
93 return -EAGAIN;
94 } else {
95 inode_lock_shared(inode);
96 }
97 /*
98 * Recheck under inode lock - at this point we are sure it cannot
99 * change anymore
100 */
101 if (!IS_DAX(inode)) {
102 inode_unlock_shared(inode);
103 /* Fallback to buffered IO in case we cannot support DAX */
104 return generic_file_read_iter(iocb, to);
105 }
106 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
107 inode_unlock_shared(inode);
108
109 file_accessed(iocb->ki_filp);
110 return ret;
111 }
112 #endif
113
ext4_file_read_iter(struct kiocb * iocb,struct iov_iter * to)114 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
115 {
116 struct inode *inode = file_inode(iocb->ki_filp);
117
118 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
119 return -EIO;
120
121 if (!iov_iter_count(to))
122 return 0; /* skip atime */
123
124 #ifdef CONFIG_FS_DAX
125 if (IS_DAX(inode))
126 return ext4_dax_read_iter(iocb, to);
127 #endif
128 if (iocb->ki_flags & IOCB_DIRECT)
129 return ext4_dio_read_iter(iocb, to);
130
131 return generic_file_read_iter(iocb, to);
132 }
133
134 /*
135 * Called when an inode is released. Note that this is different
136 * from ext4_file_open: open gets called at every open, but release
137 * gets called only when /all/ the files are closed.
138 */
ext4_release_file(struct inode * inode,struct file * filp)139 static int ext4_release_file(struct inode *inode, struct file *filp)
140 {
141 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
142 ext4_alloc_da_blocks(inode);
143 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
144 }
145 /* if we are the last writer on the inode, drop the block reservation */
146 if ((filp->f_mode & FMODE_WRITE) &&
147 (atomic_read(&inode->i_writecount) == 1) &&
148 !EXT4_I(inode)->i_reserved_data_blocks) {
149 down_write(&EXT4_I(inode)->i_data_sem);
150 ext4_discard_preallocations(inode, 0);
151 up_write(&EXT4_I(inode)->i_data_sem);
152 }
153 if (is_dx(inode) && filp->private_data)
154 ext4_htree_free_dir_info(filp->private_data);
155
156 return 0;
157 }
158
159 /*
160 * This tests whether the IO in question is block-aligned or not.
161 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
162 * are converted to written only after the IO is complete. Until they are
163 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
164 * it needs to zero out portions of the start and/or end block. If 2 AIO
165 * threads are at work on the same unwritten block, they must be synchronized
166 * or one thread will zero the other's data, causing corruption.
167 */
168 static bool
ext4_unaligned_io(struct inode * inode,struct iov_iter * from,loff_t pos)169 ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
170 {
171 struct super_block *sb = inode->i_sb;
172 unsigned long blockmask = sb->s_blocksize - 1;
173
174 if ((pos | iov_iter_alignment(from)) & blockmask)
175 return true;
176
177 return false;
178 }
179
180 static bool
ext4_extending_io(struct inode * inode,loff_t offset,size_t len)181 ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
182 {
183 if (offset + len > i_size_read(inode) ||
184 offset + len > EXT4_I(inode)->i_disksize)
185 return true;
186 return false;
187 }
188
189 /* Is IO overwriting allocated and initialized blocks? */
ext4_overwrite_io(struct inode * inode,loff_t pos,loff_t len)190 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
191 {
192 struct ext4_map_blocks map;
193 unsigned int blkbits = inode->i_blkbits;
194 int err, blklen;
195
196 if (pos + len > i_size_read(inode))
197 return false;
198
199 map.m_lblk = pos >> blkbits;
200 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
201 blklen = map.m_len;
202
203 err = ext4_map_blocks(NULL, inode, &map, 0);
204 /*
205 * 'err==len' means that all of the blocks have been preallocated,
206 * regardless of whether they have been initialized or not. To exclude
207 * unwritten extents, we need to check m_flags.
208 */
209 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
210 }
211
ext4_generic_write_checks(struct kiocb * iocb,struct iov_iter * from)212 static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
213 struct iov_iter *from)
214 {
215 struct inode *inode = file_inode(iocb->ki_filp);
216 ssize_t ret;
217
218 if (unlikely(IS_IMMUTABLE(inode)))
219 return -EPERM;
220
221 ret = generic_write_checks(iocb, from);
222 if (ret <= 0)
223 return ret;
224
225 /*
226 * If we have encountered a bitmap-format file, the size limit
227 * is smaller than s_maxbytes, which is for extent-mapped files.
228 */
229 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
230 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
231
232 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
233 return -EFBIG;
234 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
235 }
236
237 return iov_iter_count(from);
238 }
239
ext4_write_checks(struct kiocb * iocb,struct iov_iter * from)240 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
241 {
242 ssize_t ret, count;
243
244 count = ext4_generic_write_checks(iocb, from);
245 if (count <= 0)
246 return count;
247
248 ret = file_modified(iocb->ki_filp);
249 if (ret)
250 return ret;
251 return count;
252 }
253
ext4_buffered_write_iter(struct kiocb * iocb,struct iov_iter * from)254 static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
255 struct iov_iter *from)
256 {
257 ssize_t ret;
258 struct inode *inode = file_inode(iocb->ki_filp);
259
260 if (iocb->ki_flags & IOCB_NOWAIT)
261 return -EOPNOTSUPP;
262
263 ext4_fc_start_update(inode);
264 inode_lock(inode);
265 ret = ext4_write_checks(iocb, from);
266 if (ret <= 0)
267 goto out;
268
269 current->backing_dev_info = inode_to_bdi(inode);
270 ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
271 current->backing_dev_info = NULL;
272
273 out:
274 inode_unlock(inode);
275 ext4_fc_stop_update(inode);
276 if (likely(ret > 0)) {
277 iocb->ki_pos += ret;
278 ret = generic_write_sync(iocb, ret);
279 }
280
281 return ret;
282 }
283
ext4_handle_inode_extension(struct inode * inode,loff_t offset,ssize_t written,size_t count)284 static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
285 ssize_t written, size_t count)
286 {
287 handle_t *handle;
288 bool truncate = false;
289 u8 blkbits = inode->i_blkbits;
290 ext4_lblk_t written_blk, end_blk;
291 int ret;
292
293 /*
294 * Note that EXT4_I(inode)->i_disksize can get extended up to
295 * inode->i_size while the I/O was running due to writeback of delalloc
296 * blocks. But, the code in ext4_iomap_alloc() is careful to use
297 * zeroed/unwritten extents if this is possible; thus we won't leave
298 * uninitialized blocks in a file even if we didn't succeed in writing
299 * as much as we intended.
300 */
301 WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
302 if (offset + count <= EXT4_I(inode)->i_disksize) {
303 /*
304 * We need to ensure that the inode is removed from the orphan
305 * list if it has been added prematurely, due to writeback of
306 * delalloc blocks.
307 */
308 if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
309 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
310
311 if (IS_ERR(handle)) {
312 ext4_orphan_del(NULL, inode);
313 return PTR_ERR(handle);
314 }
315
316 ext4_orphan_del(handle, inode);
317 ext4_journal_stop(handle);
318 }
319
320 return written;
321 }
322
323 if (written < 0)
324 goto truncate;
325
326 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
327 if (IS_ERR(handle)) {
328 written = PTR_ERR(handle);
329 goto truncate;
330 }
331
332 if (ext4_update_inode_size(inode, offset + written)) {
333 ret = ext4_mark_inode_dirty(handle, inode);
334 if (unlikely(ret)) {
335 written = ret;
336 ext4_journal_stop(handle);
337 goto truncate;
338 }
339 }
340
341 /*
342 * We may need to truncate allocated but not written blocks beyond EOF.
343 */
344 written_blk = ALIGN(offset + written, 1 << blkbits);
345 end_blk = ALIGN(offset + count, 1 << blkbits);
346 if (written_blk < end_blk && ext4_can_truncate(inode))
347 truncate = true;
348
349 /*
350 * Remove the inode from the orphan list if it has been extended and
351 * everything went OK.
352 */
353 if (!truncate && inode->i_nlink)
354 ext4_orphan_del(handle, inode);
355 ext4_journal_stop(handle);
356
357 if (truncate) {
358 truncate:
359 ext4_truncate_failed_write(inode);
360 /*
361 * If the truncate operation failed early, then the inode may
362 * still be on the orphan list. In that case, we need to try
363 * remove the inode from the in-memory linked list.
364 */
365 if (inode->i_nlink)
366 ext4_orphan_del(NULL, inode);
367 }
368
369 return written;
370 }
371
ext4_dio_write_end_io(struct kiocb * iocb,ssize_t size,int error,unsigned int flags)372 static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
373 int error, unsigned int flags)
374 {
375 loff_t offset = iocb->ki_pos;
376 struct inode *inode = file_inode(iocb->ki_filp);
377
378 if (error)
379 return error;
380
381 if (size && flags & IOMAP_DIO_UNWRITTEN)
382 return ext4_convert_unwritten_extents(NULL, inode,
383 offset, size);
384
385 return 0;
386 }
387
388 static const struct iomap_dio_ops ext4_dio_write_ops = {
389 .end_io = ext4_dio_write_end_io,
390 };
391
392 /*
393 * The intention here is to start with shared lock acquired then see if any
394 * condition requires an exclusive inode lock. If yes, then we restart the
395 * whole operation by releasing the shared lock and acquiring exclusive lock.
396 *
397 * - For unaligned_io we never take shared lock as it may cause data corruption
398 * when two unaligned IO tries to modify the same block e.g. while zeroing.
399 *
400 * - For extending writes case we don't take the shared lock, since it requires
401 * updating inode i_disksize and/or orphan handling with exclusive lock.
402 *
403 * - shared locking will only be true mostly with overwrites. Otherwise we will
404 * switch to exclusive i_rwsem lock.
405 */
ext4_dio_write_checks(struct kiocb * iocb,struct iov_iter * from,bool * ilock_shared,bool * extend)406 static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
407 bool *ilock_shared, bool *extend)
408 {
409 struct file *file = iocb->ki_filp;
410 struct inode *inode = file_inode(file);
411 loff_t offset;
412 size_t count;
413 ssize_t ret;
414
415 restart:
416 ret = ext4_generic_write_checks(iocb, from);
417 if (ret <= 0)
418 goto out;
419
420 offset = iocb->ki_pos;
421 count = ret;
422 if (ext4_extending_io(inode, offset, count))
423 *extend = true;
424 /*
425 * Determine whether the IO operation will overwrite allocated
426 * and initialized blocks.
427 * We need exclusive i_rwsem for changing security info
428 * in file_modified().
429 */
430 if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
431 !ext4_overwrite_io(inode, offset, count))) {
432 if (iocb->ki_flags & IOCB_NOWAIT) {
433 ret = -EAGAIN;
434 goto out;
435 }
436 inode_unlock_shared(inode);
437 *ilock_shared = false;
438 inode_lock(inode);
439 goto restart;
440 }
441
442 ret = file_modified(file);
443 if (ret < 0)
444 goto out;
445
446 return count;
447 out:
448 if (*ilock_shared)
449 inode_unlock_shared(inode);
450 else
451 inode_unlock(inode);
452 return ret;
453 }
454
ext4_dio_write_iter(struct kiocb * iocb,struct iov_iter * from)455 static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
456 {
457 ssize_t ret;
458 handle_t *handle;
459 struct inode *inode = file_inode(iocb->ki_filp);
460 loff_t offset = iocb->ki_pos;
461 size_t count = iov_iter_count(from);
462 const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
463 bool extend = false, unaligned_io = false;
464 bool ilock_shared = true;
465
466 /*
467 * We initially start with shared inode lock unless it is
468 * unaligned IO which needs exclusive lock anyways.
469 */
470 if (ext4_unaligned_io(inode, from, offset)) {
471 unaligned_io = true;
472 ilock_shared = false;
473 }
474 /*
475 * Quick check here without any i_rwsem lock to see if it is extending
476 * IO. A more reliable check is done in ext4_dio_write_checks() with
477 * proper locking in place.
478 */
479 if (offset + count > i_size_read(inode))
480 ilock_shared = false;
481
482 if (iocb->ki_flags & IOCB_NOWAIT) {
483 if (ilock_shared) {
484 if (!inode_trylock_shared(inode))
485 return -EAGAIN;
486 } else {
487 if (!inode_trylock(inode))
488 return -EAGAIN;
489 }
490 } else {
491 if (ilock_shared)
492 inode_lock_shared(inode);
493 else
494 inode_lock(inode);
495 }
496
497 /* Fallback to buffered I/O if the inode does not support direct I/O. */
498 if (!ext4_dio_supported(inode)) {
499 if (ilock_shared)
500 inode_unlock_shared(inode);
501 else
502 inode_unlock(inode);
503 return ext4_buffered_write_iter(iocb, from);
504 }
505
506 ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
507 if (ret <= 0)
508 return ret;
509
510 /* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
511 if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
512 ret = -EAGAIN;
513 goto out;
514 }
515
516 offset = iocb->ki_pos;
517 count = ret;
518
519 /*
520 * Unaligned direct IO must be serialized among each other as zeroing
521 * of partial blocks of two competing unaligned IOs can result in data
522 * corruption.
523 *
524 * So we make sure we don't allow any unaligned IO in flight.
525 * For IOs where we need not wait (like unaligned non-AIO DIO),
526 * below inode_dio_wait() may anyway become a no-op, since we start
527 * with exclusive lock.
528 */
529 if (unaligned_io)
530 inode_dio_wait(inode);
531
532 if (extend) {
533 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
534 if (IS_ERR(handle)) {
535 ret = PTR_ERR(handle);
536 goto out;
537 }
538
539 ext4_fc_start_update(inode);
540 ret = ext4_orphan_add(handle, inode);
541 ext4_fc_stop_update(inode);
542 if (ret) {
543 ext4_journal_stop(handle);
544 goto out;
545 }
546
547 ext4_journal_stop(handle);
548 }
549
550 if (ilock_shared)
551 iomap_ops = &ext4_iomap_overwrite_ops;
552 ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
553 is_sync_kiocb(iocb) || unaligned_io || extend);
554 if (ret == -ENOTBLK)
555 ret = 0;
556
557 if (extend)
558 ret = ext4_handle_inode_extension(inode, offset, ret, count);
559
560 out:
561 if (ilock_shared)
562 inode_unlock_shared(inode);
563 else
564 inode_unlock(inode);
565
566 if (ret >= 0 && iov_iter_count(from)) {
567 ssize_t err;
568 loff_t endbyte;
569
570 offset = iocb->ki_pos;
571 err = ext4_buffered_write_iter(iocb, from);
572 if (err < 0)
573 return err;
574
575 /*
576 * We need to ensure that the pages within the page cache for
577 * the range covered by this I/O are written to disk and
578 * invalidated. This is in attempt to preserve the expected
579 * direct I/O semantics in the case we fallback to buffered I/O
580 * to complete off the I/O request.
581 */
582 ret += err;
583 endbyte = offset + err - 1;
584 err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
585 offset, endbyte);
586 if (!err)
587 invalidate_mapping_pages(iocb->ki_filp->f_mapping,
588 offset >> PAGE_SHIFT,
589 endbyte >> PAGE_SHIFT);
590 }
591
592 return ret;
593 }
594
595 #ifdef CONFIG_FS_DAX
596 static ssize_t
ext4_dax_write_iter(struct kiocb * iocb,struct iov_iter * from)597 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
598 {
599 ssize_t ret;
600 size_t count;
601 loff_t offset;
602 handle_t *handle;
603 bool extend = false;
604 struct inode *inode = file_inode(iocb->ki_filp);
605
606 if (iocb->ki_flags & IOCB_NOWAIT) {
607 if (!inode_trylock(inode))
608 return -EAGAIN;
609 } else {
610 inode_lock(inode);
611 }
612
613 ret = ext4_write_checks(iocb, from);
614 if (ret <= 0)
615 goto out;
616
617 offset = iocb->ki_pos;
618 count = iov_iter_count(from);
619
620 if (offset + count > EXT4_I(inode)->i_disksize) {
621 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
622 if (IS_ERR(handle)) {
623 ret = PTR_ERR(handle);
624 goto out;
625 }
626
627 ret = ext4_orphan_add(handle, inode);
628 if (ret) {
629 ext4_journal_stop(handle);
630 goto out;
631 }
632
633 extend = true;
634 ext4_journal_stop(handle);
635 }
636
637 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
638
639 if (extend)
640 ret = ext4_handle_inode_extension(inode, offset, ret, count);
641 out:
642 inode_unlock(inode);
643 if (ret > 0)
644 ret = generic_write_sync(iocb, ret);
645 return ret;
646 }
647 #endif
648
649 static ssize_t
ext4_file_write_iter(struct kiocb * iocb,struct iov_iter * from)650 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
651 {
652 struct inode *inode = file_inode(iocb->ki_filp);
653
654 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
655 return -EIO;
656
657 #ifdef CONFIG_FS_DAX
658 if (IS_DAX(inode))
659 return ext4_dax_write_iter(iocb, from);
660 #endif
661 if (iocb->ki_flags & IOCB_DIRECT)
662 return ext4_dio_write_iter(iocb, from);
663 else
664 return ext4_buffered_write_iter(iocb, from);
665 }
666
667 #ifdef CONFIG_FS_DAX
ext4_dax_huge_fault(struct vm_fault * vmf,enum page_entry_size pe_size)668 static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
669 enum page_entry_size pe_size)
670 {
671 int error = 0;
672 vm_fault_t result;
673 int retries = 0;
674 handle_t *handle = NULL;
675 struct inode *inode = file_inode(vmf->vma->vm_file);
676 struct super_block *sb = inode->i_sb;
677
678 /*
679 * We have to distinguish real writes from writes which will result in a
680 * COW page; COW writes should *not* poke the journal (the file will not
681 * be changed). Doing so would cause unintended failures when mounted
682 * read-only.
683 *
684 * We check for VM_SHARED rather than vmf->cow_page since the latter is
685 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
686 * other sizes, dax_iomap_fault will handle splitting / fallback so that
687 * we eventually come back with a COW page.
688 */
689 bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
690 (vmf->vma->vm_flags & VM_SHARED);
691 pfn_t pfn;
692
693 if (write) {
694 sb_start_pagefault(sb);
695 file_update_time(vmf->vma->vm_file);
696 down_read(&EXT4_I(inode)->i_mmap_sem);
697 retry:
698 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
699 EXT4_DATA_TRANS_BLOCKS(sb));
700 if (IS_ERR(handle)) {
701 up_read(&EXT4_I(inode)->i_mmap_sem);
702 sb_end_pagefault(sb);
703 return VM_FAULT_SIGBUS;
704 }
705 } else {
706 down_read(&EXT4_I(inode)->i_mmap_sem);
707 }
708 result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
709 if (write) {
710 ext4_journal_stop(handle);
711
712 if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
713 ext4_should_retry_alloc(sb, &retries))
714 goto retry;
715 /* Handling synchronous page fault? */
716 if (result & VM_FAULT_NEEDDSYNC)
717 result = dax_finish_sync_fault(vmf, pe_size, pfn);
718 up_read(&EXT4_I(inode)->i_mmap_sem);
719 sb_end_pagefault(sb);
720 } else {
721 up_read(&EXT4_I(inode)->i_mmap_sem);
722 }
723
724 return result;
725 }
726
ext4_dax_fault(struct vm_fault * vmf)727 static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
728 {
729 return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
730 }
731
732 static const struct vm_operations_struct ext4_dax_vm_ops = {
733 .fault = ext4_dax_fault,
734 .huge_fault = ext4_dax_huge_fault,
735 .page_mkwrite = ext4_dax_fault,
736 .pfn_mkwrite = ext4_dax_fault,
737 };
738 #else
739 #define ext4_dax_vm_ops ext4_file_vm_ops
740 #endif
741
742 static const struct vm_operations_struct ext4_file_vm_ops = {
743 .fault = ext4_filemap_fault,
744 .map_pages = filemap_map_pages,
745 .page_mkwrite = ext4_page_mkwrite,
746 };
747
ext4_file_mmap(struct file * file,struct vm_area_struct * vma)748 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
749 {
750 struct inode *inode = file->f_mapping->host;
751 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
752 struct dax_device *dax_dev = sbi->s_daxdev;
753
754 if (unlikely(ext4_forced_shutdown(sbi)))
755 return -EIO;
756
757 /*
758 * We don't support synchronous mappings for non-DAX files and
759 * for DAX files if underneath dax_device is not synchronous.
760 */
761 if (!daxdev_mapping_supported(vma, dax_dev))
762 return -EOPNOTSUPP;
763
764 file_accessed(file);
765 if (IS_DAX(file_inode(file))) {
766 vma->vm_ops = &ext4_dax_vm_ops;
767 vma->vm_flags |= VM_HUGEPAGE;
768 } else {
769 vma->vm_ops = &ext4_file_vm_ops;
770 }
771 return 0;
772 }
773
ext4_sample_last_mounted(struct super_block * sb,struct vfsmount * mnt)774 static int ext4_sample_last_mounted(struct super_block *sb,
775 struct vfsmount *mnt)
776 {
777 struct ext4_sb_info *sbi = EXT4_SB(sb);
778 struct path path;
779 char buf[64], *cp;
780 handle_t *handle;
781 int err;
782
783 if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
784 return 0;
785
786 if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
787 return 0;
788
789 ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
790 /*
791 * Sample where the filesystem has been mounted and
792 * store it in the superblock for sysadmin convenience
793 * when trying to sort through large numbers of block
794 * devices or filesystem images.
795 */
796 memset(buf, 0, sizeof(buf));
797 path.mnt = mnt;
798 path.dentry = mnt->mnt_root;
799 cp = d_path(&path, buf, sizeof(buf));
800 err = 0;
801 if (IS_ERR(cp))
802 goto out;
803
804 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
805 err = PTR_ERR(handle);
806 if (IS_ERR(handle))
807 goto out;
808 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
809 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
810 if (err)
811 goto out_journal;
812 strlcpy(sbi->s_es->s_last_mounted, cp,
813 sizeof(sbi->s_es->s_last_mounted));
814 ext4_handle_dirty_super(handle, sb);
815 out_journal:
816 ext4_journal_stop(handle);
817 out:
818 sb_end_intwrite(sb);
819 return err;
820 }
821
ext4_file_open(struct inode * inode,struct file * filp)822 static int ext4_file_open(struct inode *inode, struct file *filp)
823 {
824 int ret;
825
826 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
827 return -EIO;
828
829 ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
830 if (ret)
831 return ret;
832
833 ret = fscrypt_file_open(inode, filp);
834 if (ret)
835 return ret;
836
837 ret = fsverity_file_open(inode, filp);
838 if (ret)
839 return ret;
840
841 /*
842 * Set up the jbd2_inode if we are opening the inode for
843 * writing and the journal is present
844 */
845 if (filp->f_mode & FMODE_WRITE) {
846 ret = ext4_inode_attach_jinode(inode);
847 if (ret < 0)
848 return ret;
849 }
850
851 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
852 return dquot_file_open(inode, filp);
853 }
854
855 /*
856 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
857 * by calling generic_file_llseek_size() with the appropriate maxbytes
858 * value for each.
859 */
ext4_llseek(struct file * file,loff_t offset,int whence)860 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
861 {
862 struct inode *inode = file->f_mapping->host;
863 loff_t maxbytes;
864
865 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
866 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
867 else
868 maxbytes = inode->i_sb->s_maxbytes;
869
870 switch (whence) {
871 default:
872 return generic_file_llseek_size(file, offset, whence,
873 maxbytes, i_size_read(inode));
874 case SEEK_HOLE:
875 inode_lock_shared(inode);
876 offset = iomap_seek_hole(inode, offset,
877 &ext4_iomap_report_ops);
878 inode_unlock_shared(inode);
879 break;
880 case SEEK_DATA:
881 inode_lock_shared(inode);
882 offset = iomap_seek_data(inode, offset,
883 &ext4_iomap_report_ops);
884 inode_unlock_shared(inode);
885 break;
886 }
887
888 if (offset < 0)
889 return offset;
890 return vfs_setpos(file, offset, maxbytes);
891 }
892
893 const struct file_operations ext4_file_operations = {
894 .llseek = ext4_llseek,
895 .read_iter = ext4_file_read_iter,
896 .write_iter = ext4_file_write_iter,
897 .iopoll = iomap_dio_iopoll,
898 .unlocked_ioctl = ext4_ioctl,
899 #ifdef CONFIG_COMPAT
900 .compat_ioctl = ext4_compat_ioctl,
901 #endif
902 .mmap = ext4_file_mmap,
903 .mmap_supported_flags = MAP_SYNC,
904 .open = ext4_file_open,
905 .release = ext4_release_file,
906 .fsync = ext4_sync_file,
907 .get_unmapped_area = thp_get_unmapped_area,
908 .splice_read = generic_file_splice_read,
909 .splice_write = iter_file_splice_write,
910 .fallocate = ext4_fallocate,
911 };
912
913 const struct inode_operations ext4_file_inode_operations = {
914 .setattr = ext4_setattr,
915 .getattr = ext4_file_getattr,
916 .listxattr = ext4_listxattr,
917 .get_acl = ext4_get_acl,
918 .set_acl = ext4_set_acl,
919 .fiemap = ext4_fiemap,
920 };
921
922