1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 * Regular file handling primitives for NTFS-based filesystems.
7 *
8 */
9
10 #include <linux/backing-dev.h>
11 #include <linux/blkdev.h>
12 #include <linux/buffer_head.h>
13 #include <linux/compat.h>
14 #include <linux/falloc.h>
15 #include <linux/fiemap.h>
16 #include <linux/fileattr.h>
17 #include <linux/filelock.h>
18 #include <linux/iomap.h>
19
20 #include "debug.h"
21 #include "ntfs.h"
22 #include "ntfs_fs.h"
23
24 /*
25 * cifx, btrfs, exfat, ext4, f2fs use this constant.
26 * Hope this value will become common to all fs.
27 */
28 #define NTFS3_IOC_SHUTDOWN _IOR('X', 125, __u32)
29
30 /*
31 * Helper for ntfs_should_use_dio.
32 */
ntfs_dio_alignment(struct inode * inode)33 static u32 ntfs_dio_alignment(struct inode *inode)
34 {
35 struct ntfs_inode *ni = ntfs_i(inode);
36
37 if (is_resident(ni)) {
38 /* Check delalloc. */
39 if (!ni->file.run_da.count)
40 return 0;
41 }
42
43 /* In most cases this is bdev_logical_block_size(bdev). */
44 return ni->mi.sbi->bdev_blocksize;
45 }
46
47 /*
48 * Returns %true if the given DIO request should be attempted with DIO, or
49 * %false if it should fall back to buffered I/O.
50 */
ntfs_should_use_dio(struct kiocb * iocb,struct iov_iter * iter)51 static bool ntfs_should_use_dio(struct kiocb *iocb, struct iov_iter *iter)
52 {
53 struct inode *inode = file_inode(iocb->ki_filp);
54 u32 dio_align = ntfs_dio_alignment(inode);
55
56 if (!dio_align)
57 return false;
58
59 return IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), dio_align);
60 }
61
ntfs_ioctl_fitrim(struct ntfs_sb_info * sbi,unsigned long arg)62 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
63 {
64 struct fstrim_range __user *user_range;
65 struct fstrim_range range;
66 struct block_device *dev;
67 int err;
68
69 if (!capable(CAP_SYS_ADMIN))
70 return -EPERM;
71
72 dev = sbi->sb->s_bdev;
73 if (!bdev_max_discard_sectors(dev))
74 return -EOPNOTSUPP;
75
76 user_range = (struct fstrim_range __user *)arg;
77 if (copy_from_user(&range, user_range, sizeof(range)))
78 return -EFAULT;
79
80 range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev));
81
82 err = ntfs_trim_fs(sbi, &range);
83 if (err < 0)
84 return err;
85
86 if (copy_to_user(user_range, &range, sizeof(range)))
87 return -EFAULT;
88
89 return 0;
90 }
91
ntfs_ioctl_get_volume_label(struct ntfs_sb_info * sbi,u8 __user * buf)92 static int ntfs_ioctl_get_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
93 {
94 if (copy_to_user(buf, sbi->volume.label, FSLABEL_MAX))
95 return -EFAULT;
96
97 return 0;
98 }
99
ntfs_ioctl_set_volume_label(struct ntfs_sb_info * sbi,u8 __user * buf)100 static int ntfs_ioctl_set_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf)
101 {
102 u8 user[FSLABEL_MAX] = { 0 };
103 int len;
104
105 if (!capable(CAP_SYS_ADMIN))
106 return -EPERM;
107
108 if (copy_from_user(user, buf, FSLABEL_MAX))
109 return -EFAULT;
110
111 len = strnlen(user, FSLABEL_MAX);
112
113 return ntfs_set_label(sbi, user, len);
114 }
115
116 /*
117 * ntfs_force_shutdown - helper function. Called from ioctl
118 */
ntfs_force_shutdown(struct super_block * sb,u32 flags)119 static int ntfs_force_shutdown(struct super_block *sb, u32 flags)
120 {
121 int err;
122 struct ntfs_sb_info *sbi = sb->s_fs_info;
123
124 if (unlikely(ntfs3_forced_shutdown(sb)))
125 return 0;
126
127 /* No additional options yet (flags). */
128 err = bdev_freeze(sb->s_bdev);
129 if (err)
130 return err;
131 set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &sbi->flags);
132 bdev_thaw(sb->s_bdev);
133 return 0;
134 }
135
ntfs_ioctl_shutdown(struct super_block * sb,unsigned long arg)136 static int ntfs_ioctl_shutdown(struct super_block *sb, unsigned long arg)
137 {
138 u32 flags;
139
140 if (!capable(CAP_SYS_ADMIN))
141 return -EPERM;
142
143 if (get_user(flags, (__u32 __user *)arg))
144 return -EFAULT;
145
146 return ntfs_force_shutdown(sb, flags);
147 }
148
149 /*
150 * ntfs_ioctl - file_operations::unlocked_ioctl
151 */
ntfs_ioctl(struct file * filp,u32 cmd,unsigned long arg)152 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
153 {
154 struct inode *inode = file_inode(filp);
155 struct super_block *sb = inode->i_sb;
156 struct ntfs_sb_info *sbi = sb->s_fs_info;
157
158 /* Avoid any operation if inode is bad. */
159 if (unlikely(is_bad_ni(ntfs_i(inode))))
160 return -EINVAL;
161
162 switch (cmd) {
163 case FITRIM:
164 return ntfs_ioctl_fitrim(sbi, arg);
165 case FS_IOC_GETFSLABEL:
166 return ntfs_ioctl_get_volume_label(sbi, (u8 __user *)arg);
167 case FS_IOC_SETFSLABEL:
168 return ntfs_ioctl_set_volume_label(sbi, (u8 __user *)arg);
169 case NTFS3_IOC_SHUTDOWN:
170 return ntfs_ioctl_shutdown(sb, arg);
171 }
172 return -ENOTTY; /* Inappropriate ioctl for device. */
173 }
174
175 #ifdef CONFIG_COMPAT
ntfs_compat_ioctl(struct file * filp,u32 cmd,unsigned long arg)176 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
177
178 {
179 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
180 }
181 #endif
182
183 /*
184 * ntfs_getattr - inode_operations::getattr
185 */
ntfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,u32 flags)186 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
187 struct kstat *stat, u32 request_mask, u32 flags)
188 {
189 struct inode *inode = d_inode(path->dentry);
190 struct ntfs_inode *ni = ntfs_i(inode);
191
192 /* Avoid any operation if inode is bad. */
193 if (unlikely(is_bad_ni(ni)))
194 return -EINVAL;
195
196 stat->result_mask |= STATX_BTIME;
197 stat->btime = ni->i_crtime;
198 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
199
200 if (inode->i_flags & S_IMMUTABLE)
201 stat->attributes |= STATX_ATTR_IMMUTABLE;
202
203 if (inode->i_flags & S_APPEND)
204 stat->attributes |= STATX_ATTR_APPEND;
205
206 if (is_compressed(ni))
207 stat->attributes |= STATX_ATTR_COMPRESSED;
208
209 if (is_encrypted(ni))
210 stat->attributes |= STATX_ATTR_ENCRYPTED;
211
212 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED |
213 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
214
215 generic_fillattr(idmap, request_mask, inode, stat);
216
217 return 0;
218 }
219
ntfs_extend_initialized_size(struct file * file,struct ntfs_inode * ni,const loff_t new_valid)220 static int ntfs_extend_initialized_size(struct file *file,
221 struct ntfs_inode *ni,
222 const loff_t new_valid)
223 {
224 struct inode *inode = &ni->vfs_inode;
225 const loff_t valid = ni->i_valid;
226 int err;
227
228 if (valid >= new_valid)
229 return 0;
230
231 if (is_resident(ni)) {
232 ni->i_valid = new_valid;
233 return 0;
234 }
235
236 err = iomap_zero_range(inode, valid, new_valid - valid, NULL,
237 &ntfs_iomap_ops, &ntfs_iomap_folio_ops, NULL);
238 if (err) {
239 ni->i_valid = valid;
240 ntfs_inode_warn(inode,
241 "failed to extend initialized size to %llx.",
242 new_valid);
243 return err;
244 }
245
246 return 0;
247 }
248
ntfs_filemap_close(struct vm_area_struct * vma)249 static void ntfs_filemap_close(struct vm_area_struct *vma)
250 {
251 struct inode *inode = file_inode(vma->vm_file);
252 struct ntfs_inode *ni = ntfs_i(inode);
253 u64 from = (u64)vma->vm_pgoff << PAGE_SHIFT;
254 u64 to = min_t(u64, i_size_read(inode),
255 from + vma->vm_end - vma->vm_start);
256
257 if (ni->i_valid < to) {
258 ni->i_valid = to;
259 mark_inode_dirty(inode);
260 }
261 }
262
263 /* Copy of generic_file_vm_ops. */
264 static const struct vm_operations_struct ntfs_file_vm_ops = {
265 .close = ntfs_filemap_close,
266 .fault = filemap_fault,
267 .map_pages = filemap_map_pages,
268 .page_mkwrite = filemap_page_mkwrite,
269 };
270
271 /*
272 * ntfs_file_mmap_prepare - file_operations::mmap_prepare
273 */
ntfs_file_mmap_prepare(struct vm_area_desc * desc)274 static int ntfs_file_mmap_prepare(struct vm_area_desc *desc)
275 {
276 struct file *file = desc->file;
277 struct inode *inode = file_inode(file);
278 struct ntfs_inode *ni = ntfs_i(inode);
279 const bool rw = vma_desc_test_flags(desc, VMA_WRITE_BIT);
280 int err;
281
282 /* Avoid any operation if inode is bad. */
283 if (unlikely(is_bad_ni(ni)))
284 return -EINVAL;
285
286 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
287 return -EIO;
288
289 if (is_encrypted(ni)) {
290 ntfs_inode_warn(inode, "mmap encrypted not supported");
291 return -EOPNOTSUPP;
292 }
293
294 if (is_dedup(ni)) {
295 ntfs_inode_warn(inode, "mmap deduplicated not supported");
296 return -EOPNOTSUPP;
297 }
298
299 if (is_compressed(ni)) {
300 if (rw) {
301 ntfs_inode_warn(inode,
302 "mmap(write) compressed not supported");
303 return -EOPNOTSUPP;
304 }
305 /* Turn off readahead for compressed files. */
306 file->f_ra.ra_pages = 0;
307 }
308
309 if (rw) {
310 u64 from = (u64)desc->pgoff << PAGE_SHIFT;
311 u64 to = min_t(u64, i_size_read(inode),
312 from + vma_desc_size(desc));
313
314 if (is_sparsed(ni)) {
315 /* Allocate clusters for rw map. */
316 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
317 CLST lcn, len;
318 CLST vcn = from >> sbi->cluster_bits;
319 CLST end = bytes_to_cluster(sbi, to);
320 bool new;
321
322 for (; vcn < end; vcn += len) {
323 err = attr_data_get_block(ni, vcn, 1, &lcn,
324 &len, &new, true,
325 NULL, false);
326 if (err)
327 goto out;
328 }
329 }
330
331 if (ni->i_valid < to) {
332 if (!inode_trylock(inode)) {
333 err = -EAGAIN;
334 goto out;
335 }
336 err = ntfs_extend_initialized_size(file, ni, to);
337 inode_unlock(inode);
338 if (err)
339 goto out;
340 }
341 }
342
343 err = generic_file_mmap_prepare(desc);
344 if (!err && rw)
345 desc->vm_ops = &ntfs_file_vm_ops;
346 out:
347 return err;
348 }
349
ntfs_extend(struct inode * inode,loff_t pos,size_t count,struct file * file)350 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
351 struct file *file)
352 {
353 struct ntfs_inode *ni = ntfs_i(inode);
354 struct address_space *mapping = inode->i_mapping;
355 loff_t end = pos + count;
356 bool extend_init = file && pos > ni->i_valid;
357 int err;
358
359 if (end <= inode->i_size && !extend_init)
360 return 0;
361
362 /* Mark rw ntfs as dirty. It will be cleared at umount. */
363 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
364
365 if (end > inode->i_size) {
366 /*
367 * Normal files: increase file size, allocate space.
368 * Sparse/Compressed: increase file size. No space allocated.
369 */
370 err = ntfs_set_size(inode, end);
371 if (err)
372 goto out;
373 }
374
375 if (extend_init && !is_compressed(ni)) {
376 err = ntfs_extend_initialized_size(file, ni, pos);
377 if (err)
378 goto out;
379 } else {
380 err = 0;
381 }
382
383 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
384 mark_inode_dirty(inode);
385
386 if (IS_SYNC(inode)) {
387 int err2;
388
389 err = filemap_fdatawrite_range(mapping, pos, end - 1);
390 err2 = write_inode_now(inode, 1);
391 if (!err)
392 err = err2;
393 if (!err)
394 err = filemap_fdatawait_range(mapping, pos, end - 1);
395 }
396
397 out:
398 return err;
399 }
400
ntfs_truncate(struct inode * inode,loff_t new_size)401 static int ntfs_truncate(struct inode *inode, loff_t new_size)
402 {
403 int err;
404 struct ntfs_inode *ni = ntfs_i(inode);
405 u64 new_valid = min_t(u64, ni->i_valid, new_size);
406
407 truncate_setsize(inode, new_size);
408
409 ni_lock(ni);
410
411 down_write(&ni->file.run_lock);
412 err = attr_set_size_ex(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
413 &new_valid, ni->mi.sbi->options->prealloc, NULL,
414 false);
415 up_write(&ni->file.run_lock);
416
417 ni->i_valid = new_valid;
418
419 ni_unlock(ni);
420
421 if (err)
422 return err;
423
424 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
425 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
426 if (!IS_DIRSYNC(inode)) {
427 mark_inode_dirty(inode);
428 } else {
429 err = ntfs_sync_inode(inode);
430 if (err)
431 return err;
432 }
433
434 return 0;
435 }
436
437 /*
438 * ntfs_fallocate - file_operations::ntfs_fallocate
439 *
440 * Preallocate space for a file. This implements ntfs's fallocate file
441 * operation, which gets called from sys_fallocate system call. User
442 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
443 * we just allocate clusters without zeroing them out. Otherwise we
444 * allocate and zero out clusters via an expanding truncate.
445 */
ntfs_fallocate(struct file * file,int mode,loff_t vbo,loff_t len)446 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
447 {
448 struct inode *inode = file_inode(file);
449 struct address_space *mapping = inode->i_mapping;
450 struct super_block *sb = inode->i_sb;
451 struct ntfs_sb_info *sbi = sb->s_fs_info;
452 struct ntfs_inode *ni = ntfs_i(inode);
453 loff_t end = vbo + len;
454 loff_t vbo_down = round_down(vbo, max_t(unsigned long,
455 sbi->cluster_size, PAGE_SIZE));
456 bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
457 loff_t i_size, new_size;
458 bool map_locked;
459 int err;
460
461 /* No support for dir. */
462 if (!S_ISREG(inode->i_mode))
463 return -EOPNOTSUPP;
464
465 /*
466 * vfs_fallocate checks all possible combinations of mode.
467 * Do additional checks here before ntfs_set_state(dirty).
468 */
469 if (mode & FALLOC_FL_PUNCH_HOLE) {
470 if (!is_supported_holes)
471 return -EOPNOTSUPP;
472 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
473 } else if (mode & FALLOC_FL_INSERT_RANGE) {
474 if (!is_supported_holes)
475 return -EOPNOTSUPP;
476 } else if (mode &
477 ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
478 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
479 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
480 mode);
481 return -EOPNOTSUPP;
482 }
483
484 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
485
486 inode_lock(inode);
487 i_size = inode->i_size;
488 new_size = max(end, i_size);
489 map_locked = false;
490
491 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
492 /* Should never be here, see ntfs_file_open. */
493 err = -EOPNOTSUPP;
494 goto out;
495 }
496
497 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
498 FALLOC_FL_INSERT_RANGE)) {
499 inode_dio_wait(inode);
500 filemap_invalidate_lock(mapping);
501 map_locked = true;
502 }
503
504 if (mode & FALLOC_FL_PUNCH_HOLE) {
505 u32 frame_size;
506 loff_t mask, vbo_a, end_a, tmp, from;
507
508 err = filemap_write_and_wait_range(mapping, vbo_down,
509 LLONG_MAX);
510 if (err)
511 goto out;
512
513 truncate_pagecache(inode, vbo_down);
514
515 ni_lock(ni);
516 err = attr_punch_hole(ni, vbo, len, &frame_size);
517 ni_unlock(ni);
518 if (!err)
519 goto ok;
520
521 if (err != E_NTFS_NOTALIGNED)
522 goto out;
523
524 /* Process not aligned punch. */
525 err = 0;
526 if (end > i_size)
527 end = i_size;
528 mask = frame_size - 1;
529 vbo_a = (vbo + mask) & ~mask;
530 end_a = end & ~mask;
531
532 tmp = min(vbo_a, end);
533 from = min_t(loff_t, ni->i_valid, vbo);
534 /* Zero head of punch. */
535 if (tmp > from) {
536 err = iomap_zero_range(inode, from, tmp - from, NULL,
537 &ntfs_iomap_ops,
538 &ntfs_iomap_folio_ops, NULL);
539 if (err)
540 goto out;
541 }
542
543 /* Aligned punch_hole. Deallocate clusters. */
544 if (end_a > vbo_a) {
545 ni_lock(ni);
546 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
547 ni_unlock(ni);
548 if (err)
549 goto out;
550 }
551
552 /* Zero tail of punch. */
553 if (vbo < end_a && end_a < end) {
554 err = iomap_zero_range(inode, end_a, end - end_a, NULL,
555 &ntfs_iomap_ops,
556 &ntfs_iomap_folio_ops, NULL);
557 if (err)
558 goto out;
559 }
560 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
561 /*
562 * Write tail of the last page before removed range since
563 * it will get removed from the page cache below.
564 */
565 err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
566 if (err)
567 goto out;
568
569 /*
570 * Write data that will be shifted to preserve them
571 * when discarding page cache below.
572 */
573 err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
574 if (err)
575 goto out;
576
577 truncate_pagecache(inode, vbo_down);
578
579 ni_lock(ni);
580 err = attr_collapse_range(ni, vbo, len);
581 ni_unlock(ni);
582 if (err)
583 goto out;
584 } else if (mode & FALLOC_FL_INSERT_RANGE) {
585 /* Check new size. */
586 err = inode_newsize_ok(inode, new_size);
587 if (err)
588 goto out;
589
590 /* Write out all dirty pages. */
591 err = filemap_write_and_wait_range(mapping, vbo_down,
592 LLONG_MAX);
593 if (err)
594 goto out;
595 truncate_pagecache(inode, vbo_down);
596
597 ni_lock(ni);
598 err = attr_insert_range(ni, vbo, len);
599 ni_unlock(ni);
600 if (err)
601 goto out;
602 } else {
603 /* Check new size. */
604 u8 cluster_bits = sbi->cluster_bits;
605
606 /* Be sure file is non resident. */
607 if (is_resident(ni)) {
608 ni_lock(ni);
609 err = attr_force_nonresident(ni);
610 ni_unlock(ni);
611 if (err)
612 goto out;
613 }
614
615 /* generic/213: expected -ENOSPC instead of -EFBIG. */
616 if (!is_supported_holes) {
617 loff_t to_alloc = new_size - inode_get_bytes(inode);
618
619 if (to_alloc > 0 &&
620 (to_alloc >> cluster_bits) >
621 wnd_zeroes(&sbi->used.bitmap)) {
622 err = -ENOSPC;
623 goto out;
624 }
625 }
626
627 err = inode_newsize_ok(inode, new_size);
628 if (err)
629 goto out;
630
631 if (new_size > i_size) {
632 /*
633 * Allocate clusters, do not change 'valid' size.
634 */
635 err = ntfs_set_size(inode, new_size);
636 if (err)
637 goto out;
638 }
639
640 if (is_supported_holes) {
641 CLST vcn = vbo >> cluster_bits;
642 CLST cend = bytes_to_cluster(sbi, end);
643 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
644 CLST lcn, clen;
645 bool new;
646
647 if (cend_v > cend)
648 cend_v = cend;
649
650 /*
651 * Allocate and zero new clusters.
652 * Zeroing these clusters may be too long.
653 */
654 for (; vcn < cend_v; vcn += clen) {
655 err = attr_data_get_block(ni, vcn, cend_v - vcn,
656 &lcn, &clen, &new,
657 true, NULL, false);
658 if (err)
659 goto out;
660 }
661
662 /*
663 * Moving up 'valid size'.
664 */
665 err = ntfs_extend_initialized_size(
666 file, ni, (u64)cend_v << cluster_bits);
667 if (err)
668 goto out;
669
670 /*
671 * Allocate but not zero new clusters.
672 */
673 for (; vcn < cend; vcn += clen) {
674 err = attr_data_get_block(ni, vcn, cend - vcn,
675 &lcn, &clen, &new,
676 false, NULL, false);
677 if (err)
678 goto out;
679 }
680 }
681
682 if (mode & FALLOC_FL_KEEP_SIZE) {
683 ni_lock(ni);
684 /* True - Keep preallocated. */
685 err = attr_set_size(ni, ATTR_DATA, NULL, 0,
686 &ni->file.run, i_size, &ni->i_valid,
687 true);
688 ni_unlock(ni);
689 if (err)
690 goto out;
691 i_size_write(inode, i_size);
692 } else if (new_size > i_size) {
693 i_size_write(inode, new_size);
694 }
695 }
696
697 ok:
698 err = file_modified(file);
699 if (err)
700 goto out;
701
702 out:
703 if (map_locked)
704 filemap_invalidate_unlock(mapping);
705
706 if (!err) {
707 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
708 mark_inode_dirty(inode);
709 }
710
711 inode_unlock(inode);
712 return err;
713 }
714
715 /*
716 * ntfs_setattr - inode_operations::setattr
717 */
ntfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)718 int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
719 struct iattr *attr)
720 {
721 struct inode *inode = d_inode(dentry);
722 struct ntfs_inode *ni = ntfs_i(inode);
723 u32 ia_valid = attr->ia_valid;
724 umode_t mode = inode->i_mode;
725 int err;
726
727 /* Avoid any operation if inode is bad. */
728 if (unlikely(is_bad_ni(ni)))
729 return -EINVAL;
730
731 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
732 return -EIO;
733
734 err = setattr_prepare(idmap, dentry, attr);
735 if (err)
736 goto out;
737
738 if (ia_valid & ATTR_SIZE) {
739 loff_t newsize, oldsize;
740
741 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
742 /* Should never be here, see ntfs_file_open(). */
743 err = -EOPNOTSUPP;
744 goto out;
745 }
746 inode_dio_wait(inode);
747 oldsize = i_size_read(inode);
748 newsize = attr->ia_size;
749
750 if (newsize <= oldsize)
751 err = ntfs_truncate(inode, newsize);
752 else
753 err = ntfs_extend(inode, newsize, 0, NULL);
754
755 if (err)
756 goto out;
757
758 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
759 i_size_write(inode, newsize);
760 }
761
762 setattr_copy(idmap, inode, attr);
763
764 if (mode != inode->i_mode) {
765 err = ntfs_acl_chmod(idmap, dentry);
766 if (err)
767 goto out;
768
769 /* Linux 'w' -> Windows 'ro'. */
770 if (0222 & inode->i_mode)
771 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
772 else
773 ni->std_fa |= FILE_ATTRIBUTE_READONLY;
774 }
775
776 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE))
777 ntfs_save_wsl_perm(inode, NULL);
778 mark_inode_dirty(inode);
779 out:
780 return err;
781 }
782
783 /*
784 * check_read_restriction:
785 * common code for ntfs_file_read_iter and ntfs_file_splice_read
786 */
check_read_restriction(struct inode * inode)787 static int check_read_restriction(struct inode *inode)
788 {
789 struct ntfs_inode *ni = ntfs_i(inode);
790
791 /* Avoid any operation if inode is bad. */
792 if (unlikely(is_bad_ni(ni)))
793 return -EINVAL;
794
795 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
796 return -EIO;
797
798 if (is_encrypted(ni)) {
799 ntfs_inode_warn(inode, "encrypted i/o not supported");
800 return -EOPNOTSUPP;
801 }
802
803 #ifndef CONFIG_NTFS3_LZX_XPRESS
804 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
805 ntfs_inode_warn(
806 inode,
807 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
808 return -EOPNOTSUPP;
809 }
810 #endif
811
812 if (is_dedup(ni)) {
813 ntfs_inode_warn(inode, "read deduplicated not supported");
814 return -EOPNOTSUPP;
815 }
816
817 return 0;
818 }
819
820 /*
821 * ntfs_file_read_iter - file_operations::read_iter
822 */
ntfs_file_read_iter(struct kiocb * iocb,struct iov_iter * iter)823 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
824 {
825 struct file *file = iocb->ki_filp;
826 struct inode *inode = file_inode(file);
827 struct ntfs_inode *ni = ntfs_i(inode);
828 size_t bytes = iov_iter_count(iter);
829 loff_t valid, i_size, vbo, end;
830 unsigned int dio_flags;
831 ssize_t err;
832
833 err = check_read_restriction(inode);
834 if (err)
835 return err;
836
837 if (!bytes)
838 return 0; /* skip atime */
839
840 if (is_compressed(ni)) {
841 if (iocb->ki_flags & IOCB_DIRECT) {
842 ntfs_inode_warn(
843 inode, "direct i/o + compressed not supported");
844 return -EOPNOTSUPP;
845 }
846 /* Turn off readahead for compressed files. */
847 file->f_ra.ra_pages = 0;
848 }
849
850 /* Fallback to buffered I/O if the inode does not support direct I/O. */
851 if (!(iocb->ki_flags & IOCB_DIRECT) ||
852 !ntfs_should_use_dio(iocb, iter)) {
853 iocb->ki_flags &= ~IOCB_DIRECT;
854 return generic_file_read_iter(iocb, iter);
855 }
856
857 if (iocb->ki_flags & IOCB_NOWAIT) {
858 if (!inode_trylock_shared(inode))
859 return -EAGAIN;
860 } else {
861 inode_lock_shared(inode);
862 }
863
864 vbo = iocb->ki_pos;
865 end = vbo + bytes;
866 dio_flags = 0;
867 valid = ni->i_valid;
868 i_size = inode->i_size;
869
870 if (vbo < valid) {
871 if (valid < end) {
872 /* read cross 'valid' size. */
873 dio_flags |= IOMAP_DIO_FORCE_WAIT;
874 }
875
876 if (ni->file.run_da.count) {
877 /* Direct I/O is not compatible with delalloc. */
878 err = ni_allocate_da_blocks(ni);
879 if (err)
880 goto out;
881 }
882
883 err = iomap_dio_rw(iocb, iter, &ntfs_iomap_ops, NULL, dio_flags,
884 NULL, 0);
885
886 if (err <= 0)
887 goto out;
888 end = vbo + err;
889 if (valid < end) {
890 size_t to_zero = end - valid;
891 /* Fix iter. */
892 iov_iter_revert(iter, to_zero);
893 iov_iter_zero(to_zero, iter);
894 }
895 } else if (vbo < i_size) {
896 if (end > i_size)
897 bytes = i_size - vbo;
898 iov_iter_zero(bytes, iter);
899 iocb->ki_pos += bytes;
900 err = bytes;
901 }
902
903 out:
904 inode_unlock_shared(inode);
905 file_accessed(iocb->ki_filp);
906 return err;
907 }
908
909 /*
910 * ntfs_file_splice_read - file_operations::splice_read
911 */
ntfs_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)912 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
913 struct pipe_inode_info *pipe, size_t len,
914 unsigned int flags)
915 {
916 struct inode *inode = file_inode(in);
917 ssize_t err;
918
919 err = check_read_restriction(inode);
920 if (err)
921 return err;
922
923 if (is_compressed(ntfs_i(inode))) {
924 /* Turn off readahead for compressed files. */
925 in->f_ra.ra_pages = 0;
926 }
927
928 return filemap_splice_read(in, ppos, pipe, len, flags);
929 }
930
931 /*
932 * ntfs_get_frame_pages
933 *
934 * Return: Array of locked pages.
935 */
ntfs_get_frame_pages(struct address_space * mapping,pgoff_t index,struct page ** pages,u32 pages_per_frame,bool * frame_uptodate)936 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
937 struct page **pages, u32 pages_per_frame,
938 bool *frame_uptodate)
939 {
940 gfp_t gfp_mask = mapping_gfp_mask(mapping);
941 u32 npages;
942
943 *frame_uptodate = true;
944
945 for (npages = 0; npages < pages_per_frame; npages++, index++) {
946 struct folio *folio;
947
948 folio = __filemap_get_folio(mapping, index,
949 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
950 gfp_mask | __GFP_ZERO);
951 if (IS_ERR(folio)) {
952 while (npages--) {
953 folio = page_folio(pages[npages]);
954 folio_unlock(folio);
955 folio_put(folio);
956 }
957
958 return -ENOMEM;
959 }
960
961 if (!folio_test_uptodate(folio))
962 *frame_uptodate = false;
963
964 pages[npages] = &folio->page;
965 }
966
967 return 0;
968 }
969
970 /*
971 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
972 */
ntfs_compress_write(struct kiocb * iocb,struct iov_iter * from)973 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
974 {
975 int err;
976 struct file *file = iocb->ki_filp;
977 size_t count = iov_iter_count(from);
978 loff_t pos = iocb->ki_pos;
979 struct inode *inode = file_inode(file);
980 loff_t i_size = i_size_read(inode);
981 struct address_space *mapping = inode->i_mapping;
982 struct ntfs_inode *ni = ntfs_i(inode);
983 u64 valid = ni->i_valid;
984 struct ntfs_sb_info *sbi = ni->mi.sbi;
985 struct page **pages = NULL;
986 struct folio *folio;
987 size_t written = 0;
988 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
989 u32 frame_size = 1u << frame_bits;
990 u32 pages_per_frame = frame_size >> PAGE_SHIFT;
991 u32 ip, off;
992 CLST frame;
993 u64 frame_vbo;
994 pgoff_t index;
995 bool frame_uptodate;
996
997 if (frame_size < PAGE_SIZE) {
998 /*
999 * frame_size == 8K if cluster 512
1000 * frame_size == 64K if cluster 4096
1001 */
1002 ntfs_inode_warn(inode, "page size is bigger than frame size");
1003 return -EOPNOTSUPP;
1004 }
1005
1006 pages = kmalloc_objs(struct page *, pages_per_frame, GFP_NOFS);
1007 if (!pages)
1008 return -ENOMEM;
1009
1010 err = file_remove_privs(file);
1011 if (err)
1012 goto out;
1013
1014 err = file_update_time(file);
1015 if (err)
1016 goto out;
1017
1018 /* Zero range [valid : pos). */
1019 while (valid < pos) {
1020 CLST lcn, clen;
1021
1022 frame = valid >> frame_bits;
1023 frame_vbo = valid & ~(frame_size - 1);
1024 off = valid & (frame_size - 1);
1025
1026 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn,
1027 &clen, NULL, false, NULL, false);
1028 if (err)
1029 goto out;
1030
1031 if (lcn == SPARSE_LCN) {
1032 ni->i_valid = valid =
1033 frame_vbo + ((u64)clen << sbi->cluster_bits);
1034 continue;
1035 }
1036
1037 /* Load full frame. */
1038 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
1039 pages, pages_per_frame,
1040 &frame_uptodate);
1041 if (err)
1042 goto out;
1043
1044 if (!frame_uptodate && off) {
1045 err = ni_read_frame(ni, frame_vbo, pages,
1046 pages_per_frame, 0);
1047 if (err) {
1048 for (ip = 0; ip < pages_per_frame; ip++) {
1049 folio = page_folio(pages[ip]);
1050 folio_unlock(folio);
1051 folio_put(folio);
1052 }
1053 goto out;
1054 }
1055 }
1056
1057 ip = off >> PAGE_SHIFT;
1058 off = offset_in_page(valid);
1059 for (; ip < pages_per_frame; ip++, off = 0) {
1060 folio = page_folio(pages[ip]);
1061 folio_zero_segment(folio, off, PAGE_SIZE);
1062 flush_dcache_folio(folio);
1063 folio_mark_uptodate(folio);
1064 }
1065
1066 ni_lock(ni);
1067 err = ni_write_frame(ni, pages, pages_per_frame);
1068 ni_unlock(ni);
1069
1070 for (ip = 0; ip < pages_per_frame; ip++) {
1071 folio = page_folio(pages[ip]);
1072 folio_mark_uptodate(folio);
1073 folio_unlock(folio);
1074 folio_put(folio);
1075 }
1076
1077 if (err)
1078 goto out;
1079
1080 ni->i_valid = valid = frame_vbo + frame_size;
1081 }
1082
1083 /* Copy user data [pos : pos + count). */
1084 while (count) {
1085 size_t copied, bytes;
1086
1087 off = pos & (frame_size - 1);
1088 bytes = frame_size - off;
1089 if (bytes > count)
1090 bytes = count;
1091
1092 frame_vbo = pos & ~(frame_size - 1);
1093 index = frame_vbo >> PAGE_SHIFT;
1094
1095 if (unlikely(fault_in_iov_iter_readable(from, bytes))) {
1096 err = -EFAULT;
1097 goto out;
1098 }
1099
1100 /* Load full frame. */
1101 err = ntfs_get_frame_pages(mapping, index, pages,
1102 pages_per_frame, &frame_uptodate);
1103 if (err)
1104 goto out;
1105
1106 if (!frame_uptodate) {
1107 loff_t to = pos + bytes;
1108
1109 if (off || (to < i_size && (to & (frame_size - 1)))) {
1110 err = ni_read_frame(ni, frame_vbo, pages,
1111 pages_per_frame, 0);
1112 if (err) {
1113 for (ip = 0; ip < pages_per_frame;
1114 ip++) {
1115 folio = page_folio(pages[ip]);
1116 folio_unlock(folio);
1117 folio_put(folio);
1118 }
1119 goto out;
1120 }
1121 }
1122 }
1123
1124 WARN_ON(!bytes);
1125 copied = 0;
1126 ip = off >> PAGE_SHIFT;
1127 off = offset_in_page(pos);
1128
1129 /* Copy user data to pages. */
1130 for (;;) {
1131 size_t cp, tail = PAGE_SIZE - off;
1132
1133 folio = page_folio(pages[ip]);
1134 cp = copy_folio_from_iter_atomic(
1135 folio, off, min(tail, bytes), from);
1136 flush_dcache_folio(folio);
1137
1138 copied += cp;
1139 bytes -= cp;
1140 if (!bytes || !cp)
1141 break;
1142
1143 if (cp < tail) {
1144 off += cp;
1145 } else {
1146 ip++;
1147 off = 0;
1148 }
1149 }
1150
1151 ni_lock(ni);
1152 err = ni_write_frame(ni, pages, pages_per_frame);
1153 ni_unlock(ni);
1154
1155 for (ip = 0; ip < pages_per_frame; ip++) {
1156 folio = page_folio(pages[ip]);
1157 folio_clear_dirty(folio);
1158 folio_mark_uptodate(folio);
1159 folio_unlock(folio);
1160 folio_put(folio);
1161 }
1162
1163 if (err)
1164 goto out;
1165
1166 /*
1167 * We can loop for a long time in here. Be nice and allow
1168 * us to schedule out to avoid softlocking if preempt
1169 * is disabled.
1170 */
1171 cond_resched();
1172
1173 pos += copied;
1174 written += copied;
1175
1176 count = iov_iter_count(from);
1177 }
1178
1179 out:
1180 kfree(pages);
1181
1182 if (err < 0)
1183 return err;
1184
1185 iocb->ki_pos += written;
1186 if (iocb->ki_pos > ni->i_valid)
1187 ni->i_valid = iocb->ki_pos;
1188 if (iocb->ki_pos > i_size)
1189 i_size_write(inode, iocb->ki_pos);
1190
1191 return written;
1192 }
1193
1194 /*
1195 * check_write_restriction:
1196 * common code for ntfs_file_write_iter and ntfs_file_splice_write
1197 */
check_write_restriction(struct inode * inode)1198 static int check_write_restriction(struct inode *inode)
1199 {
1200 struct ntfs_inode *ni = ntfs_i(inode);
1201
1202 /* Avoid any operation if inode is bad. */
1203 if (unlikely(is_bad_ni(ni)))
1204 return -EINVAL;
1205
1206 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1207 return -EIO;
1208
1209 if (is_encrypted(ni)) {
1210 ntfs_inode_warn(inode, "encrypted i/o not supported");
1211 return -EOPNOTSUPP;
1212 }
1213
1214 if (is_dedup(ni)) {
1215 ntfs_inode_warn(inode, "write into deduplicated not supported");
1216 return -EOPNOTSUPP;
1217 }
1218
1219 if (unlikely(IS_IMMUTABLE(inode)))
1220 return -EPERM;
1221
1222 return 0;
1223 }
1224
1225 /*
1226 * ntfs_file_write_iter - file_operations::write_iter
1227 */
ntfs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1228 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1229 {
1230 struct file *file = iocb->ki_filp;
1231 struct inode *inode = file_inode(file);
1232 struct ntfs_inode *ni = ntfs_i(inode);
1233 ssize_t ret, err;
1234
1235 if (!inode_trylock(inode)) {
1236 if (iocb->ki_flags & IOCB_NOWAIT)
1237 return -EAGAIN;
1238 inode_lock(inode);
1239 }
1240
1241 ret = check_write_restriction(inode);
1242 if (ret)
1243 goto out;
1244
1245 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
1246 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
1247 ret = -EOPNOTSUPP;
1248 goto out;
1249 }
1250
1251 ret = generic_write_checks(iocb, from);
1252 if (ret <= 0)
1253 goto out;
1254
1255 err = file_modified(iocb->ki_filp);
1256 if (err) {
1257 ret = err;
1258 goto out;
1259 }
1260
1261 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
1262 /* Should never be here, see ntfs_file_open(). */
1263 ret = -EOPNOTSUPP;
1264 goto out;
1265 }
1266
1267 ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
1268 if (ret)
1269 goto out;
1270
1271 if (is_compressed(ni)) {
1272 ret = ntfs_compress_write(iocb, from);
1273 goto out;
1274 }
1275
1276 /* Fallback to buffered I/O if the inode does not support direct I/O. */
1277 if (!(iocb->ki_flags & IOCB_DIRECT) ||
1278 !ntfs_should_use_dio(iocb, from)) {
1279 iocb->ki_flags &= ~IOCB_DIRECT;
1280
1281 ret = iomap_file_buffered_write(iocb, from, &ntfs_iomap_ops,
1282 &ntfs_iomap_folio_ops, NULL);
1283 inode_unlock(inode);
1284
1285 if (likely(ret > 0))
1286 ret = generic_write_sync(iocb, ret);
1287
1288 return ret;
1289 }
1290
1291 if (ni->file.run_da.count) {
1292 /* Direct I/O is not compatible with delalloc. */
1293 ret = ni_allocate_da_blocks(ni);
1294 if (ret)
1295 goto out;
1296 }
1297
1298 ret = iomap_dio_rw(iocb, from, &ntfs_iomap_ops, NULL, 0, NULL, 0);
1299
1300 if (ret == -ENOTBLK) {
1301 /* Returns -ENOTBLK in case of a page invalidation failure for writes.*/
1302 /* The callers needs to fall back to buffered I/O in this case. */
1303 ret = 0;
1304 }
1305
1306 if (ret >= 0 && iov_iter_count(from)) {
1307 loff_t offset = iocb->ki_pos, endbyte;
1308
1309 iocb->ki_flags &= ~IOCB_DIRECT;
1310 err = iomap_file_buffered_write(iocb, from, &ntfs_iomap_ops,
1311 &ntfs_iomap_folio_ops, NULL);
1312 if (err < 0) {
1313 ret = err;
1314 goto out;
1315 }
1316
1317 /*
1318 * We need to ensure that the pages within the page cache for
1319 * the range covered by this I/O are written to disk and
1320 * invalidated. This is in attempt to preserve the expected
1321 * direct I/O semantics in the case we fallback to buffered I/O
1322 * to complete off the I/O request.
1323 */
1324 ret += err;
1325 endbyte = offset + err - 1;
1326 err = filemap_write_and_wait_range(inode->i_mapping, offset,
1327 endbyte);
1328 if (err) {
1329 ret = err;
1330 goto out;
1331 }
1332
1333 invalidate_mapping_pages(inode->i_mapping, offset >> PAGE_SHIFT,
1334 endbyte >> PAGE_SHIFT);
1335 }
1336
1337 out:
1338 inode_unlock(inode);
1339
1340 return ret;
1341 }
1342
1343 /*
1344 * ntfs_file_open - file_operations::open
1345 */
ntfs_file_open(struct inode * inode,struct file * file)1346 int ntfs_file_open(struct inode *inode, struct file *file)
1347 {
1348 struct ntfs_inode *ni = ntfs_i(inode);
1349
1350 /* Avoid any operation if inode is bad. */
1351 if (unlikely(is_bad_ni(ni)))
1352 return -EINVAL;
1353
1354 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1355 return -EIO;
1356
1357 if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
1358 (file->f_flags & O_DIRECT))) {
1359 return -EOPNOTSUPP;
1360 }
1361
1362 /* Decompress "external compressed" file if opened for rw. */
1363 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
1364 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
1365 #ifdef CONFIG_NTFS3_LZX_XPRESS
1366 int err = ni_decompress_file(ni);
1367
1368 if (err)
1369 return err;
1370 #else
1371 ntfs_inode_warn(
1372 inode,
1373 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
1374 return -EOPNOTSUPP;
1375 #endif
1376 }
1377
1378 file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
1379
1380 return generic_file_open(inode, file);
1381 }
1382
1383 /*
1384 * ntfs_file_release - file_operations::release
1385 *
1386 * Called when an inode is released. Note that this is different
1387 * from ntfs_file_open: open gets called at every open, but release
1388 * gets called only when /all/ the files are closed.
1389 */
ntfs_file_release(struct inode * inode,struct file * file)1390 static int ntfs_file_release(struct inode *inode, struct file *file)
1391 {
1392 int err;
1393 struct ntfs_inode *ni;
1394
1395 if (!(file->f_mode & FMODE_WRITE) ||
1396 atomic_read(&inode->i_writecount) != 1 ||
1397 inode->i_ino == MFT_REC_MFT) {
1398 return 0;
1399 }
1400
1401 /* Close the last writer on the inode. */
1402 ni = ntfs_i(inode);
1403
1404 /* Allocate delayed blocks (clusters). */
1405 err = ni_allocate_da_blocks(ni);
1406 if (err)
1407 goto out;
1408
1409 if (ni->mi.sbi->options->prealloc) {
1410 ni_lock(ni);
1411 down_write(&ni->file.run_lock);
1412
1413 /* Deallocate preallocated. */
1414 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
1415 inode->i_size, &ni->i_valid, false);
1416
1417 up_write(&ni->file.run_lock);
1418 ni_unlock(ni);
1419 }
1420 out:
1421 return err;
1422 }
1423
1424 /*
1425 * ntfs_fiemap - inode_operations::fiemap
1426 */
ntfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)1427 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1428 __u64 start, __u64 len)
1429 {
1430 int err;
1431 struct ntfs_inode *ni = ntfs_i(inode);
1432
1433 /* Avoid any operation if inode is bad. */
1434 if (unlikely(is_bad_ni(ni)))
1435 return -EINVAL;
1436
1437 if (is_compressed(ni)) {
1438 /* Unfortunately cp -r incorrectly treats compressed clusters. */
1439 ntfs_inode_warn(inode,
1440 "fiemap is not supported for compressed file");
1441 return -EOPNOTSUPP;
1442 }
1443
1444 if (S_ISDIR(inode->i_mode)) {
1445 /* TODO: add support for dirs (ATTR_ALLOC). */
1446 ntfs_inode_warn(inode,
1447 "fiemap is not supported for directories");
1448 return -EOPNOTSUPP;
1449 }
1450
1451 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1452 ntfs_inode_warn(inode, "fiemap(xattr) is not supported");
1453 return -EOPNOTSUPP;
1454 }
1455
1456 inode_lock_shared(inode);
1457
1458 err = iomap_fiemap(inode, fieinfo, start, len, &ntfs_iomap_ops);
1459
1460 inode_unlock_shared(inode);
1461 return err;
1462 }
1463
1464 /*
1465 * ntfs_file_splice_write - file_operations::splice_write
1466 */
ntfs_file_splice_write(struct pipe_inode_info * pipe,struct file * file,loff_t * ppos,size_t len,unsigned int flags)1467 static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe,
1468 struct file *file, loff_t *ppos,
1469 size_t len, unsigned int flags)
1470 {
1471 ssize_t err;
1472 struct inode *inode = file_inode(file);
1473
1474 err = check_write_restriction(inode);
1475 if (err)
1476 return err;
1477
1478 return iter_file_splice_write(pipe, file, ppos, len, flags);
1479 }
1480
1481 /*
1482 * ntfs_file_fsync - file_operations::fsync
1483 */
ntfs_file_fsync(struct file * file,loff_t start,loff_t end,int datasync)1484 int ntfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1485 {
1486 struct inode *inode = file_inode(file);
1487 struct super_block *sb = inode->i_sb;
1488 struct ntfs_sb_info *sbi = sb->s_fs_info;
1489 int err, ret;
1490
1491 if (unlikely(ntfs3_forced_shutdown(sb)))
1492 return -EIO;
1493
1494 ret = file_write_and_wait_range(file, start, end);
1495 if (ret)
1496 return ret;
1497
1498 ret = write_inode_now(inode, !datasync);
1499
1500 if (!ret) {
1501 ret = ni_write_parents(ntfs_i(inode), !datasync);
1502 }
1503
1504 if (!ret) {
1505 ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
1506 ntfs_update_mftmirr(sbi);
1507 }
1508
1509 err = sync_blockdev(sb->s_bdev);
1510 if (unlikely(err && !ret))
1511 ret = err;
1512 if (!ret)
1513 blkdev_issue_flush(sb->s_bdev);
1514 return ret;
1515 }
1516
1517 /*
1518 * ntfs_llseek - file_operations::llseek
1519 */
ntfs_llseek(struct file * file,loff_t offset,int whence)1520 static loff_t ntfs_llseek(struct file *file, loff_t offset, int whence)
1521 {
1522 struct inode *inode = file->f_mapping->host;
1523 struct ntfs_inode *ni = ntfs_i(inode);
1524 loff_t maxbytes = ntfs_get_maxbytes(ni);
1525 loff_t ret;
1526
1527 if (whence == SEEK_DATA || whence == SEEK_HOLE) {
1528 inode_lock_shared(inode);
1529 /* Scan file for hole or data. */
1530 ret = ni_seek_data_or_hole(ni, offset, whence == SEEK_DATA);
1531 inode_unlock_shared(inode);
1532
1533 if (ret >= 0)
1534 ret = vfs_setpos(file, ret, maxbytes);
1535 } else {
1536 ret = generic_file_llseek_size(file, offset, whence, maxbytes,
1537 i_size_read(inode));
1538 }
1539 return ret;
1540 }
1541
1542 // clang-format off
1543 const struct inode_operations ntfs_file_inode_operations = {
1544 .getattr = ntfs_getattr,
1545 .setattr = ntfs_setattr,
1546 .listxattr = ntfs_listxattr,
1547 .get_acl = ntfs_get_acl,
1548 .set_acl = ntfs_set_acl,
1549 .fiemap = ntfs_fiemap,
1550 };
1551
1552 const struct file_operations ntfs_file_operations = {
1553 .llseek = ntfs_llseek,
1554 .read_iter = ntfs_file_read_iter,
1555 .write_iter = ntfs_file_write_iter,
1556 .unlocked_ioctl = ntfs_ioctl,
1557 #ifdef CONFIG_COMPAT
1558 .compat_ioctl = ntfs_compat_ioctl,
1559 #endif
1560 .splice_read = ntfs_file_splice_read,
1561 .splice_write = ntfs_file_splice_write,
1562 .mmap_prepare = ntfs_file_mmap_prepare,
1563 .open = ntfs_file_open,
1564 .fsync = ntfs_file_fsync,
1565 .fallocate = ntfs_fallocate,
1566 .release = ntfs_file_release,
1567 .setlease = generic_setlease,
1568 };
1569
1570 #if IS_ENABLED(CONFIG_NTFS_FS)
1571 const struct file_operations ntfs_legacy_file_operations = {
1572 .llseek = generic_file_llseek,
1573 .read_iter = ntfs_file_read_iter,
1574 .splice_read = ntfs_file_splice_read,
1575 .open = ntfs_file_open,
1576 .release = ntfs_file_release,
1577 .setlease = generic_setlease,
1578 };
1579 #endif
1580 // clang-format on
1581