1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  *  Regular file handling primitives for NTFS-based filesystems.
7  *
8  */
9 
10 #include <linux/backing-dev.h>
11 #include <linux/blkdev.h>
12 #include <linux/buffer_head.h>
13 #include <linux/compat.h>
14 #include <linux/falloc.h>
15 #include <linux/fiemap.h>
16 #include <linux/fileattr.h>
17 
18 #include "debug.h"
19 #include "ntfs.h"
20 #include "ntfs_fs.h"
21 
22 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
23 {
24 	struct fstrim_range __user *user_range;
25 	struct fstrim_range range;
26 	struct block_device *dev;
27 	int err;
28 
29 	if (!capable(CAP_SYS_ADMIN))
30 		return -EPERM;
31 
32 	dev = sbi->sb->s_bdev;
33 	if (!bdev_max_discard_sectors(dev))
34 		return -EOPNOTSUPP;
35 
36 	user_range = (struct fstrim_range __user *)arg;
37 	if (copy_from_user(&range, user_range, sizeof(range)))
38 		return -EFAULT;
39 
40 	range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev));
41 
42 	err = ntfs_trim_fs(sbi, &range);
43 	if (err < 0)
44 		return err;
45 
46 	if (copy_to_user(user_range, &range, sizeof(range)))
47 		return -EFAULT;
48 
49 	return 0;
50 }
51 
52 /*
53  * ntfs_ioctl - file_operations::unlocked_ioctl
54  */
55 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
56 {
57 	struct inode *inode = file_inode(filp);
58 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
59 
60 	switch (cmd) {
61 	case FITRIM:
62 		return ntfs_ioctl_fitrim(sbi, arg);
63 	}
64 	return -ENOTTY; /* Inappropriate ioctl for device. */
65 }
66 
67 #ifdef CONFIG_COMPAT
68 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
69 
70 {
71 	return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
72 }
73 #endif
74 
75 /*
76  * ntfs_getattr - inode_operations::getattr
77  */
78 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
79 		 struct kstat *stat, u32 request_mask, u32 flags)
80 {
81 	struct inode *inode = d_inode(path->dentry);
82 	struct ntfs_inode *ni = ntfs_i(inode);
83 
84 	stat->result_mask |= STATX_BTIME;
85 	stat->btime = ni->i_crtime;
86 	stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
87 
88 	if (inode->i_flags & S_IMMUTABLE)
89 		stat->attributes |= STATX_ATTR_IMMUTABLE;
90 
91 	if (inode->i_flags & S_APPEND)
92 		stat->attributes |= STATX_ATTR_APPEND;
93 
94 	if (is_compressed(ni))
95 		stat->attributes |= STATX_ATTR_COMPRESSED;
96 
97 	if (is_encrypted(ni))
98 		stat->attributes |= STATX_ATTR_ENCRYPTED;
99 
100 	stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED |
101 				 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
102 
103 	generic_fillattr(idmap, request_mask, inode, stat);
104 
105 	return 0;
106 }
107 
108 static int ntfs_extend_initialized_size(struct file *file,
109 					struct ntfs_inode *ni,
110 					const loff_t valid,
111 					const loff_t new_valid)
112 {
113 	struct inode *inode = &ni->vfs_inode;
114 	struct address_space *mapping = inode->i_mapping;
115 	struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
116 	loff_t pos = valid;
117 	int err;
118 
119 	if (valid >= new_valid)
120 		return 0;
121 
122 	if (is_resident(ni)) {
123 		ni->i_valid = new_valid;
124 		return 0;
125 	}
126 
127 	WARN_ON(is_compressed(ni));
128 
129 	for (;;) {
130 		u32 zerofrom, len;
131 		struct folio *folio;
132 		u8 bits;
133 		CLST vcn, lcn, clen;
134 
135 		if (is_sparsed(ni)) {
136 			bits = sbi->cluster_bits;
137 			vcn = pos >> bits;
138 
139 			err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL,
140 						  false);
141 			if (err)
142 				goto out;
143 
144 			if (lcn == SPARSE_LCN) {
145 				pos = ((loff_t)clen + vcn) << bits;
146 				ni->i_valid = pos;
147 				goto next;
148 			}
149 		}
150 
151 		zerofrom = pos & (PAGE_SIZE - 1);
152 		len = PAGE_SIZE - zerofrom;
153 
154 		if (pos + len > new_valid)
155 			len = new_valid - pos;
156 
157 		err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL);
158 		if (err)
159 			goto out;
160 
161 		folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom);
162 
163 		err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL);
164 		if (err < 0)
165 			goto out;
166 		pos += len;
167 
168 next:
169 		if (pos >= new_valid)
170 			break;
171 
172 		balance_dirty_pages_ratelimited(mapping);
173 		cond_resched();
174 	}
175 
176 	return 0;
177 
178 out:
179 	ni->i_valid = valid;
180 	ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
181 			new_valid);
182 	return err;
183 }
184 
185 /*
186  * ntfs_zero_range - Helper function for punch_hole.
187  *
188  * It zeroes a range [vbo, vbo_to).
189  */
190 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
191 {
192 	int err = 0;
193 	struct address_space *mapping = inode->i_mapping;
194 	u32 blocksize = i_blocksize(inode);
195 	pgoff_t idx = vbo >> PAGE_SHIFT;
196 	u32 from = vbo & (PAGE_SIZE - 1);
197 	pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
198 	loff_t page_off;
199 	struct buffer_head *head, *bh;
200 	u32 bh_next, bh_off, to;
201 	sector_t iblock;
202 	struct folio *folio;
203 	bool dirty = false;
204 
205 	for (; idx < idx_end; idx += 1, from = 0) {
206 		page_off = (loff_t)idx << PAGE_SHIFT;
207 		to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) :
208 						       PAGE_SIZE;
209 		iblock = page_off >> inode->i_blkbits;
210 
211 		folio = __filemap_get_folio(
212 			mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
213 			mapping_gfp_constraint(mapping, ~__GFP_FS));
214 		if (IS_ERR(folio))
215 			return PTR_ERR(folio);
216 
217 		head = folio_buffers(folio);
218 		if (!head)
219 			head = create_empty_buffers(folio, blocksize, 0);
220 
221 		bh = head;
222 		bh_off = 0;
223 		do {
224 			bh_next = bh_off + blocksize;
225 
226 			if (bh_next <= from || bh_off >= to)
227 				continue;
228 
229 			if (!buffer_mapped(bh)) {
230 				ntfs_get_block(inode, iblock, bh, 0);
231 				/* Unmapped? It's a hole - nothing to do. */
232 				if (!buffer_mapped(bh))
233 					continue;
234 			}
235 
236 			/* Ok, it's mapped. Make sure it's up-to-date. */
237 			if (folio_test_uptodate(folio))
238 				set_buffer_uptodate(bh);
239 			else if (bh_read(bh, 0) < 0) {
240 				err = -EIO;
241 				folio_unlock(folio);
242 				folio_put(folio);
243 				goto out;
244 			}
245 
246 			mark_buffer_dirty(bh);
247 		} while (bh_off = bh_next, iblock += 1,
248 			 head != (bh = bh->b_this_page));
249 
250 		folio_zero_segment(folio, from, to);
251 		dirty = true;
252 
253 		folio_unlock(folio);
254 		folio_put(folio);
255 		cond_resched();
256 	}
257 out:
258 	if (dirty)
259 		mark_inode_dirty(inode);
260 	return err;
261 }
262 
263 /*
264  * ntfs_file_mmap - file_operations::mmap
265  */
266 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
267 {
268 	struct inode *inode = file_inode(file);
269 	struct ntfs_inode *ni = ntfs_i(inode);
270 	u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
271 	bool rw = vma->vm_flags & VM_WRITE;
272 	int err;
273 
274 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
275 		return -EIO;
276 
277 	if (is_encrypted(ni)) {
278 		ntfs_inode_warn(inode, "mmap encrypted not supported");
279 		return -EOPNOTSUPP;
280 	}
281 
282 	if (is_dedup(ni)) {
283 		ntfs_inode_warn(inode, "mmap deduplicated not supported");
284 		return -EOPNOTSUPP;
285 	}
286 
287 	if (is_compressed(ni) && rw) {
288 		ntfs_inode_warn(inode, "mmap(write) compressed not supported");
289 		return -EOPNOTSUPP;
290 	}
291 
292 	if (rw) {
293 		u64 to = min_t(loff_t, i_size_read(inode),
294 			       from + vma->vm_end - vma->vm_start);
295 
296 		if (is_sparsed(ni)) {
297 			/* Allocate clusters for rw map. */
298 			struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
299 			CLST lcn, len;
300 			CLST vcn = from >> sbi->cluster_bits;
301 			CLST end = bytes_to_cluster(sbi, to);
302 			bool new;
303 
304 			for (; vcn < end; vcn += len) {
305 				err = attr_data_get_block(ni, vcn, 1, &lcn,
306 							  &len, &new, true);
307 				if (err)
308 					goto out;
309 			}
310 		}
311 
312 		if (ni->i_valid < to) {
313 			inode_lock(inode);
314 			err = ntfs_extend_initialized_size(file, ni,
315 							   ni->i_valid, to);
316 			inode_unlock(inode);
317 			if (err)
318 				goto out;
319 		}
320 	}
321 
322 	err = generic_file_mmap(file, vma);
323 out:
324 	return err;
325 }
326 
327 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
328 		       struct file *file)
329 {
330 	struct ntfs_inode *ni = ntfs_i(inode);
331 	struct address_space *mapping = inode->i_mapping;
332 	loff_t end = pos + count;
333 	bool extend_init = file && pos > ni->i_valid;
334 	int err;
335 
336 	if (end <= inode->i_size && !extend_init)
337 		return 0;
338 
339 	/* Mark rw ntfs as dirty. It will be cleared at umount. */
340 	ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
341 
342 	if (end > inode->i_size) {
343 		err = ntfs_set_size(inode, end);
344 		if (err)
345 			goto out;
346 	}
347 
348 	if (extend_init && !is_compressed(ni)) {
349 		err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
350 		if (err)
351 			goto out;
352 	} else {
353 		err = 0;
354 	}
355 
356 	if (file && is_sparsed(ni)) {
357 		/*
358 		 * This code optimizes large writes to sparse file.
359 		 * TODO: merge this fragment with fallocate fragment.
360 		 */
361 		struct ntfs_sb_info *sbi = ni->mi.sbi;
362 		CLST vcn = pos >> sbi->cluster_bits;
363 		CLST cend = bytes_to_cluster(sbi, end);
364 		CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
365 		CLST lcn, clen;
366 		bool new;
367 
368 		if (cend_v > cend)
369 			cend_v = cend;
370 
371 		/*
372 		 * Allocate and zero new clusters.
373 		 * Zeroing these clusters may be too long.
374 		 */
375 		for (; vcn < cend_v; vcn += clen) {
376 			err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn,
377 						  &clen, &new, true);
378 			if (err)
379 				goto out;
380 		}
381 		/*
382 		 * Allocate but not zero new clusters.
383 		 */
384 		for (; vcn < cend; vcn += clen) {
385 			err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
386 						  &clen, &new, false);
387 			if (err)
388 				goto out;
389 		}
390 	}
391 
392 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
393 	mark_inode_dirty(inode);
394 
395 	if (IS_SYNC(inode)) {
396 		int err2;
397 
398 		err = filemap_fdatawrite_range(mapping, pos, end - 1);
399 		err2 = sync_mapping_buffers(mapping);
400 		if (!err)
401 			err = err2;
402 		err2 = write_inode_now(inode, 1);
403 		if (!err)
404 			err = err2;
405 		if (!err)
406 			err = filemap_fdatawait_range(mapping, pos, end - 1);
407 	}
408 
409 out:
410 	return err;
411 }
412 
413 static int ntfs_truncate(struct inode *inode, loff_t new_size)
414 {
415 	struct super_block *sb = inode->i_sb;
416 	struct ntfs_inode *ni = ntfs_i(inode);
417 	int err, dirty = 0;
418 	u64 new_valid;
419 
420 	if (!S_ISREG(inode->i_mode))
421 		return 0;
422 
423 	if (is_compressed(ni)) {
424 		if (ni->i_valid > new_size)
425 			ni->i_valid = new_size;
426 	} else {
427 		err = block_truncate_page(inode->i_mapping, new_size,
428 					  ntfs_get_block);
429 		if (err)
430 			return err;
431 	}
432 
433 	new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
434 
435 	truncate_setsize(inode, new_size);
436 
437 	ni_lock(ni);
438 
439 	down_write(&ni->file.run_lock);
440 	err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
441 			    &new_valid, ni->mi.sbi->options->prealloc, NULL);
442 	up_write(&ni->file.run_lock);
443 
444 	if (new_valid < ni->i_valid)
445 		ni->i_valid = new_valid;
446 
447 	ni_unlock(ni);
448 
449 	ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
450 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
451 	if (!IS_DIRSYNC(inode)) {
452 		dirty = 1;
453 	} else {
454 		err = ntfs_sync_inode(inode);
455 		if (err)
456 			return err;
457 	}
458 
459 	if (dirty)
460 		mark_inode_dirty(inode);
461 
462 	/*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
463 
464 	return 0;
465 }
466 
467 /*
468  * ntfs_fallocate - file_operations::ntfs_fallocate
469  *
470  * Preallocate space for a file. This implements ntfs's fallocate file
471  * operation, which gets called from sys_fallocate system call. User
472  * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
473  * we just allocate clusters without zeroing them out. Otherwise we
474  * allocate and zero out clusters via an expanding truncate.
475  */
476 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
477 {
478 	struct inode *inode = file_inode(file);
479 	struct address_space *mapping = inode->i_mapping;
480 	struct super_block *sb = inode->i_sb;
481 	struct ntfs_sb_info *sbi = sb->s_fs_info;
482 	struct ntfs_inode *ni = ntfs_i(inode);
483 	loff_t end = vbo + len;
484 	loff_t vbo_down = round_down(vbo, max_t(unsigned long,
485 						sbi->cluster_size, PAGE_SIZE));
486 	bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
487 	loff_t i_size, new_size;
488 	bool map_locked;
489 	int err;
490 
491 	/* No support for dir. */
492 	if (!S_ISREG(inode->i_mode))
493 		return -EOPNOTSUPP;
494 
495 	/*
496 	 * vfs_fallocate checks all possible combinations of mode.
497 	 * Do additional checks here before ntfs_set_state(dirty).
498 	 */
499 	if (mode & FALLOC_FL_PUNCH_HOLE) {
500 		if (!is_supported_holes)
501 			return -EOPNOTSUPP;
502 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
503 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
504 		if (!is_supported_holes)
505 			return -EOPNOTSUPP;
506 	} else if (mode &
507 		   ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
508 		     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
509 		ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
510 				mode);
511 		return -EOPNOTSUPP;
512 	}
513 
514 	ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
515 
516 	inode_lock(inode);
517 	i_size = inode->i_size;
518 	new_size = max(end, i_size);
519 	map_locked = false;
520 
521 	if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
522 		/* Should never be here, see ntfs_file_open. */
523 		err = -EOPNOTSUPP;
524 		goto out;
525 	}
526 
527 	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
528 		    FALLOC_FL_INSERT_RANGE)) {
529 		inode_dio_wait(inode);
530 		filemap_invalidate_lock(mapping);
531 		map_locked = true;
532 	}
533 
534 	if (mode & FALLOC_FL_PUNCH_HOLE) {
535 		u32 frame_size;
536 		loff_t mask, vbo_a, end_a, tmp;
537 
538 		err = filemap_write_and_wait_range(mapping, vbo_down,
539 						   LLONG_MAX);
540 		if (err)
541 			goto out;
542 
543 		truncate_pagecache(inode, vbo_down);
544 
545 		ni_lock(ni);
546 		err = attr_punch_hole(ni, vbo, len, &frame_size);
547 		ni_unlock(ni);
548 		if (!err)
549 			goto ok;
550 
551 		if (err != E_NTFS_NOTALIGNED)
552 			goto out;
553 
554 		/* Process not aligned punch. */
555 		err = 0;
556 		mask = frame_size - 1;
557 		vbo_a = (vbo + mask) & ~mask;
558 		end_a = end & ~mask;
559 
560 		tmp = min(vbo_a, end);
561 		if (tmp > vbo) {
562 			err = ntfs_zero_range(inode, vbo, tmp);
563 			if (err)
564 				goto out;
565 		}
566 
567 		if (vbo < end_a && end_a < end) {
568 			err = ntfs_zero_range(inode, end_a, end);
569 			if (err)
570 				goto out;
571 		}
572 
573 		/* Aligned punch_hole */
574 		if (end_a > vbo_a) {
575 			ni_lock(ni);
576 			err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
577 			ni_unlock(ni);
578 			if (err)
579 				goto out;
580 		}
581 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
582 		/*
583 		 * Write tail of the last page before removed range since
584 		 * it will get removed from the page cache below.
585 		 */
586 		err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
587 		if (err)
588 			goto out;
589 
590 		/*
591 		 * Write data that will be shifted to preserve them
592 		 * when discarding page cache below.
593 		 */
594 		err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
595 		if (err)
596 			goto out;
597 
598 		truncate_pagecache(inode, vbo_down);
599 
600 		ni_lock(ni);
601 		err = attr_collapse_range(ni, vbo, len);
602 		ni_unlock(ni);
603 		if (err)
604 			goto out;
605 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
606 		/* Check new size. */
607 		err = inode_newsize_ok(inode, new_size);
608 		if (err)
609 			goto out;
610 
611 		/* Write out all dirty pages. */
612 		err = filemap_write_and_wait_range(mapping, vbo_down,
613 						   LLONG_MAX);
614 		if (err)
615 			goto out;
616 		truncate_pagecache(inode, vbo_down);
617 
618 		ni_lock(ni);
619 		err = attr_insert_range(ni, vbo, len);
620 		ni_unlock(ni);
621 		if (err)
622 			goto out;
623 	} else {
624 		/* Check new size. */
625 		u8 cluster_bits = sbi->cluster_bits;
626 
627 		/* Be sure file is non resident. */
628 		if (is_resident(ni)) {
629 			ni_lock(ni);
630 			err = attr_force_nonresident(ni);
631 			ni_unlock(ni);
632 			if (err)
633 				goto out;
634 		}
635 
636 		/* generic/213: expected -ENOSPC instead of -EFBIG. */
637 		if (!is_supported_holes) {
638 			loff_t to_alloc = new_size - inode_get_bytes(inode);
639 
640 			if (to_alloc > 0 &&
641 			    (to_alloc >> cluster_bits) >
642 				    wnd_zeroes(&sbi->used.bitmap)) {
643 				err = -ENOSPC;
644 				goto out;
645 			}
646 		}
647 
648 		err = inode_newsize_ok(inode, new_size);
649 		if (err)
650 			goto out;
651 
652 		if (new_size > i_size) {
653 			/*
654 			 * Allocate clusters, do not change 'valid' size.
655 			 */
656 			err = ntfs_set_size(inode, new_size);
657 			if (err)
658 				goto out;
659 		}
660 
661 		if (is_supported_holes) {
662 			CLST vcn = vbo >> cluster_bits;
663 			CLST cend = bytes_to_cluster(sbi, end);
664 			CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
665 			CLST lcn, clen;
666 			bool new;
667 
668 			if (cend_v > cend)
669 				cend_v = cend;
670 
671 			/*
672 			 * Allocate and zero new clusters.
673 			 * Zeroing these clusters may be too long.
674 			 */
675 			for (; vcn < cend_v; vcn += clen) {
676 				err = attr_data_get_block(ni, vcn, cend_v - vcn,
677 							  &lcn, &clen, &new,
678 							  true);
679 				if (err)
680 					goto out;
681 			}
682 			/*
683 			 * Allocate but not zero new clusters.
684 			 */
685 			for (; vcn < cend; vcn += clen) {
686 				err = attr_data_get_block(ni, vcn, cend - vcn,
687 							  &lcn, &clen, &new,
688 							  false);
689 				if (err)
690 					goto out;
691 			}
692 		}
693 
694 		if (mode & FALLOC_FL_KEEP_SIZE) {
695 			ni_lock(ni);
696 			/* True - Keep preallocated. */
697 			err = attr_set_size(ni, ATTR_DATA, NULL, 0,
698 					    &ni->file.run, i_size, &ni->i_valid,
699 					    true, NULL);
700 			ni_unlock(ni);
701 			if (err)
702 				goto out;
703 		} else if (new_size > i_size) {
704 			i_size_write(inode, new_size);
705 		}
706 	}
707 
708 ok:
709 	err = file_modified(file);
710 	if (err)
711 		goto out;
712 
713 out:
714 	if (map_locked)
715 		filemap_invalidate_unlock(mapping);
716 
717 	if (!err) {
718 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
719 		mark_inode_dirty(inode);
720 	}
721 
722 	inode_unlock(inode);
723 	return err;
724 }
725 
726 /*
727  * ntfs_setattr - inode_operations::setattr
728  */
729 int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
730 		 struct iattr *attr)
731 {
732 	struct inode *inode = d_inode(dentry);
733 	struct ntfs_inode *ni = ntfs_i(inode);
734 	u32 ia_valid = attr->ia_valid;
735 	umode_t mode = inode->i_mode;
736 	int err;
737 
738 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
739 		return -EIO;
740 
741 	err = setattr_prepare(idmap, dentry, attr);
742 	if (err)
743 		goto out;
744 
745 	if (ia_valid & ATTR_SIZE) {
746 		loff_t newsize, oldsize;
747 
748 		if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
749 			/* Should never be here, see ntfs_file_open(). */
750 			err = -EOPNOTSUPP;
751 			goto out;
752 		}
753 		inode_dio_wait(inode);
754 		oldsize = i_size_read(inode);
755 		newsize = attr->ia_size;
756 
757 		if (newsize <= oldsize)
758 			err = ntfs_truncate(inode, newsize);
759 		else
760 			err = ntfs_extend(inode, newsize, 0, NULL);
761 
762 		if (err)
763 			goto out;
764 
765 		ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
766 		i_size_write(inode, newsize);
767 	}
768 
769 	setattr_copy(idmap, inode, attr);
770 
771 	if (mode != inode->i_mode) {
772 		err = ntfs_acl_chmod(idmap, dentry);
773 		if (err)
774 			goto out;
775 
776 		/* Linux 'w' -> Windows 'ro'. */
777 		if (0222 & inode->i_mode)
778 			ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
779 		else
780 			ni->std_fa |= FILE_ATTRIBUTE_READONLY;
781 	}
782 
783 	if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE))
784 		ntfs_save_wsl_perm(inode, NULL);
785 	mark_inode_dirty(inode);
786 out:
787 	return err;
788 }
789 
790 /*
791  * check_read_restriction:
792  * common code for ntfs_file_read_iter and ntfs_file_splice_read
793  */
794 static int check_read_restriction(struct inode *inode)
795 {
796 	struct ntfs_inode *ni = ntfs_i(inode);
797 
798 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
799 		return -EIO;
800 
801 	if (is_encrypted(ni)) {
802 		ntfs_inode_warn(inode, "encrypted i/o not supported");
803 		return -EOPNOTSUPP;
804 	}
805 
806 #ifndef CONFIG_NTFS3_LZX_XPRESS
807 	if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
808 		ntfs_inode_warn(
809 			inode,
810 			"activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
811 		return -EOPNOTSUPP;
812 	}
813 #endif
814 
815 	if (is_dedup(ni)) {
816 		ntfs_inode_warn(inode, "read deduplicated not supported");
817 		return -EOPNOTSUPP;
818 	}
819 
820 	return 0;
821 }
822 
823 /*
824  * ntfs_file_read_iter - file_operations::read_iter
825  */
826 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
827 {
828 	struct file *file = iocb->ki_filp;
829 	struct inode *inode = file_inode(file);
830 	struct ntfs_inode *ni = ntfs_i(inode);
831 	ssize_t err;
832 
833 	err = check_read_restriction(inode);
834 	if (err)
835 		return err;
836 
837 	if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
838 		ntfs_inode_warn(inode, "direct i/o + compressed not supported");
839 		return -EOPNOTSUPP;
840 	}
841 
842 	return generic_file_read_iter(iocb, iter);
843 }
844 
845 /*
846  * ntfs_file_splice_read - file_operations::splice_read
847  */
848 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
849 				     struct pipe_inode_info *pipe, size_t len,
850 				     unsigned int flags)
851 {
852 	struct inode *inode = file_inode(in);
853 	ssize_t err;
854 
855 	err = check_read_restriction(inode);
856 	if (err)
857 		return err;
858 
859 	return filemap_splice_read(in, ppos, pipe, len, flags);
860 }
861 
862 /*
863  * ntfs_get_frame_pages
864  *
865  * Return: Array of locked pages.
866  */
867 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
868 				struct page **pages, u32 pages_per_frame,
869 				bool *frame_uptodate)
870 {
871 	gfp_t gfp_mask = mapping_gfp_mask(mapping);
872 	u32 npages;
873 
874 	*frame_uptodate = true;
875 
876 	for (npages = 0; npages < pages_per_frame; npages++, index++) {
877 		struct folio *folio;
878 
879 		folio = __filemap_get_folio(mapping, index,
880 					    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
881 					    gfp_mask);
882 		if (IS_ERR(folio)) {
883 			while (npages--) {
884 				folio = page_folio(pages[npages]);
885 				folio_unlock(folio);
886 				folio_put(folio);
887 			}
888 
889 			return -ENOMEM;
890 		}
891 
892 		if (!folio_test_uptodate(folio))
893 			*frame_uptodate = false;
894 
895 		pages[npages] = &folio->page;
896 	}
897 
898 	return 0;
899 }
900 
901 /*
902  * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
903  */
904 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
905 {
906 	int err;
907 	struct file *file = iocb->ki_filp;
908 	size_t count = iov_iter_count(from);
909 	loff_t pos = iocb->ki_pos;
910 	struct inode *inode = file_inode(file);
911 	loff_t i_size = i_size_read(inode);
912 	struct address_space *mapping = inode->i_mapping;
913 	struct ntfs_inode *ni = ntfs_i(inode);
914 	u64 valid = ni->i_valid;
915 	struct ntfs_sb_info *sbi = ni->mi.sbi;
916 	struct page **pages = NULL;
917 	struct folio *folio;
918 	size_t written = 0;
919 	u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
920 	u32 frame_size = 1u << frame_bits;
921 	u32 pages_per_frame = frame_size >> PAGE_SHIFT;
922 	u32 ip, off;
923 	CLST frame;
924 	u64 frame_vbo;
925 	pgoff_t index;
926 	bool frame_uptodate;
927 
928 	if (frame_size < PAGE_SIZE) {
929 		/*
930 		 * frame_size == 8K if cluster 512
931 		 * frame_size == 64K if cluster 4096
932 		 */
933 		ntfs_inode_warn(inode, "page size is bigger than frame size");
934 		return -EOPNOTSUPP;
935 	}
936 
937 	pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS);
938 	if (!pages)
939 		return -ENOMEM;
940 
941 	err = file_remove_privs(file);
942 	if (err)
943 		goto out;
944 
945 	err = file_update_time(file);
946 	if (err)
947 		goto out;
948 
949 	/* Zero range [valid : pos). */
950 	while (valid < pos) {
951 		CLST lcn, clen;
952 
953 		frame = valid >> frame_bits;
954 		frame_vbo = valid & ~(frame_size - 1);
955 		off = valid & (frame_size - 1);
956 
957 		err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn,
958 					  &clen, NULL, false);
959 		if (err)
960 			goto out;
961 
962 		if (lcn == SPARSE_LCN) {
963 			ni->i_valid = valid =
964 				frame_vbo + ((u64)clen << sbi->cluster_bits);
965 			continue;
966 		}
967 
968 		/* Load full frame. */
969 		err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
970 					   pages, pages_per_frame,
971 					   &frame_uptodate);
972 		if (err)
973 			goto out;
974 
975 		if (!frame_uptodate && off) {
976 			err = ni_read_frame(ni, frame_vbo, pages,
977 					    pages_per_frame);
978 			if (err) {
979 				for (ip = 0; ip < pages_per_frame; ip++) {
980 					folio = page_folio(pages[ip]);
981 					folio_unlock(folio);
982 					folio_put(folio);
983 				}
984 				goto out;
985 			}
986 		}
987 
988 		ip = off >> PAGE_SHIFT;
989 		off = offset_in_page(valid);
990 		for (; ip < pages_per_frame; ip++, off = 0) {
991 			folio = page_folio(pages[ip]);
992 			folio_zero_segment(folio, off, PAGE_SIZE);
993 			flush_dcache_folio(folio);
994 			folio_mark_uptodate(folio);
995 		}
996 
997 		ni_lock(ni);
998 		err = ni_write_frame(ni, pages, pages_per_frame);
999 		ni_unlock(ni);
1000 
1001 		for (ip = 0; ip < pages_per_frame; ip++) {
1002 			folio = page_folio(pages[ip]);
1003 			folio_mark_uptodate(folio);
1004 			folio_unlock(folio);
1005 			folio_put(folio);
1006 		}
1007 
1008 		if (err)
1009 			goto out;
1010 
1011 		ni->i_valid = valid = frame_vbo + frame_size;
1012 	}
1013 
1014 	/* Copy user data [pos : pos + count). */
1015 	while (count) {
1016 		size_t copied, bytes;
1017 
1018 		off = pos & (frame_size - 1);
1019 		bytes = frame_size - off;
1020 		if (bytes > count)
1021 			bytes = count;
1022 
1023 		frame_vbo = pos & ~(frame_size - 1);
1024 		index = frame_vbo >> PAGE_SHIFT;
1025 
1026 		if (unlikely(fault_in_iov_iter_readable(from, bytes))) {
1027 			err = -EFAULT;
1028 			goto out;
1029 		}
1030 
1031 		/* Load full frame. */
1032 		err = ntfs_get_frame_pages(mapping, index, pages,
1033 					   pages_per_frame, &frame_uptodate);
1034 		if (err)
1035 			goto out;
1036 
1037 		if (!frame_uptodate) {
1038 			loff_t to = pos + bytes;
1039 
1040 			if (off || (to < i_size && (to & (frame_size - 1)))) {
1041 				err = ni_read_frame(ni, frame_vbo, pages,
1042 						    pages_per_frame);
1043 				if (err) {
1044 					for (ip = 0; ip < pages_per_frame;
1045 					     ip++) {
1046 						folio = page_folio(pages[ip]);
1047 						folio_unlock(folio);
1048 						folio_put(folio);
1049 					}
1050 					goto out;
1051 				}
1052 			}
1053 		}
1054 
1055 		WARN_ON(!bytes);
1056 		copied = 0;
1057 		ip = off >> PAGE_SHIFT;
1058 		off = offset_in_page(pos);
1059 
1060 		/* Copy user data to pages. */
1061 		for (;;) {
1062 			size_t cp, tail = PAGE_SIZE - off;
1063 
1064 			folio = page_folio(pages[ip]);
1065 			cp = copy_folio_from_iter_atomic(folio, off,
1066 							min(tail, bytes), from);
1067 			flush_dcache_folio(folio);
1068 
1069 			copied += cp;
1070 			bytes -= cp;
1071 			if (!bytes || !cp)
1072 				break;
1073 
1074 			if (cp < tail) {
1075 				off += cp;
1076 			} else {
1077 				ip++;
1078 				off = 0;
1079 			}
1080 		}
1081 
1082 		ni_lock(ni);
1083 		err = ni_write_frame(ni, pages, pages_per_frame);
1084 		ni_unlock(ni);
1085 
1086 		for (ip = 0; ip < pages_per_frame; ip++) {
1087 			folio = page_folio(pages[ip]);
1088 			folio_clear_dirty(folio);
1089 			folio_mark_uptodate(folio);
1090 			folio_unlock(folio);
1091 			folio_put(folio);
1092 		}
1093 
1094 		if (err)
1095 			goto out;
1096 
1097 		/*
1098 		 * We can loop for a long time in here. Be nice and allow
1099 		 * us to schedule out to avoid softlocking if preempt
1100 		 * is disabled.
1101 		 */
1102 		cond_resched();
1103 
1104 		pos += copied;
1105 		written += copied;
1106 
1107 		count = iov_iter_count(from);
1108 	}
1109 
1110 out:
1111 	kfree(pages);
1112 
1113 	if (err < 0)
1114 		return err;
1115 
1116 	iocb->ki_pos += written;
1117 	if (iocb->ki_pos > ni->i_valid)
1118 		ni->i_valid = iocb->ki_pos;
1119 	if (iocb->ki_pos > i_size)
1120 		i_size_write(inode, iocb->ki_pos);
1121 
1122 	return written;
1123 }
1124 
1125 /*
1126  * check_write_restriction:
1127  * common code for ntfs_file_write_iter and ntfs_file_splice_write
1128  */
1129 static int check_write_restriction(struct inode *inode)
1130 {
1131 	struct ntfs_inode *ni = ntfs_i(inode);
1132 
1133 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1134 		return -EIO;
1135 
1136 	if (is_encrypted(ni)) {
1137 		ntfs_inode_warn(inode, "encrypted i/o not supported");
1138 		return -EOPNOTSUPP;
1139 	}
1140 
1141 	if (is_dedup(ni)) {
1142 		ntfs_inode_warn(inode, "write into deduplicated not supported");
1143 		return -EOPNOTSUPP;
1144 	}
1145 
1146 	return 0;
1147 }
1148 
1149 /*
1150  * ntfs_file_write_iter - file_operations::write_iter
1151  */
1152 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1153 {
1154 	struct file *file = iocb->ki_filp;
1155 	struct inode *inode = file_inode(file);
1156 	struct ntfs_inode *ni = ntfs_i(inode);
1157 	ssize_t ret;
1158 	int err;
1159 
1160 	if (!inode_trylock(inode)) {
1161 		if (iocb->ki_flags & IOCB_NOWAIT)
1162 			return -EAGAIN;
1163 		inode_lock(inode);
1164 	}
1165 
1166 	ret = check_write_restriction(inode);
1167 	if (ret)
1168 		goto out;
1169 
1170 	if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
1171 		ntfs_inode_warn(inode, "direct i/o + compressed not supported");
1172 		ret = -EOPNOTSUPP;
1173 		goto out;
1174 	}
1175 
1176 	ret = generic_write_checks(iocb, from);
1177 	if (ret <= 0)
1178 		goto out;
1179 
1180 	err = file_modified(iocb->ki_filp);
1181 	if (err) {
1182 		ret = err;
1183 		goto out;
1184 	}
1185 
1186 	if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
1187 		/* Should never be here, see ntfs_file_open(). */
1188 		ret = -EOPNOTSUPP;
1189 		goto out;
1190 	}
1191 
1192 	ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
1193 	if (ret)
1194 		goto out;
1195 
1196 	ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) :
1197 				  __generic_file_write_iter(iocb, from);
1198 
1199 out:
1200 	inode_unlock(inode);
1201 
1202 	if (ret > 0)
1203 		ret = generic_write_sync(iocb, ret);
1204 
1205 	return ret;
1206 }
1207 
1208 /*
1209  * ntfs_file_open - file_operations::open
1210  */
1211 int ntfs_file_open(struct inode *inode, struct file *file)
1212 {
1213 	struct ntfs_inode *ni = ntfs_i(inode);
1214 
1215 	if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1216 		return -EIO;
1217 
1218 	if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
1219 		     (file->f_flags & O_DIRECT))) {
1220 		return -EOPNOTSUPP;
1221 	}
1222 
1223 	/* Decompress "external compressed" file if opened for rw. */
1224 	if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
1225 	    (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
1226 #ifdef CONFIG_NTFS3_LZX_XPRESS
1227 		int err = ni_decompress_file(ni);
1228 
1229 		if (err)
1230 			return err;
1231 #else
1232 		ntfs_inode_warn(
1233 			inode,
1234 			"activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
1235 		return -EOPNOTSUPP;
1236 #endif
1237 	}
1238 
1239 	return generic_file_open(inode, file);
1240 }
1241 
1242 /*
1243  * ntfs_file_release - file_operations::release
1244  */
1245 static int ntfs_file_release(struct inode *inode, struct file *file)
1246 {
1247 	struct ntfs_inode *ni = ntfs_i(inode);
1248 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1249 	int err = 0;
1250 
1251 	/* If we are last writer on the inode, drop the block reservation. */
1252 	if (sbi->options->prealloc &&
1253 	    ((file->f_mode & FMODE_WRITE) &&
1254 	     atomic_read(&inode->i_writecount) == 1)
1255 	   /*
1256 	    * The only file when inode->i_fop = &ntfs_file_operations and
1257 	    * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT.
1258 	    *
1259 	    * Add additional check here.
1260 	    */
1261 	    && inode->i_ino != MFT_REC_MFT) {
1262 		ni_lock(ni);
1263 		down_write(&ni->file.run_lock);
1264 
1265 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
1266 				    i_size_read(inode), &ni->i_valid, false,
1267 				    NULL);
1268 
1269 		up_write(&ni->file.run_lock);
1270 		ni_unlock(ni);
1271 	}
1272 	return err;
1273 }
1274 
1275 /*
1276  * ntfs_fiemap - inode_operations::fiemap
1277  */
1278 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1279 		__u64 start, __u64 len)
1280 {
1281 	int err;
1282 	struct ntfs_inode *ni = ntfs_i(inode);
1283 
1284 	err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR);
1285 	if (err)
1286 		return err;
1287 
1288 	ni_lock(ni);
1289 
1290 	err = ni_fiemap(ni, fieinfo, start, len);
1291 
1292 	ni_unlock(ni);
1293 
1294 	return err;
1295 }
1296 
1297 /*
1298  * ntfs_file_splice_write - file_operations::splice_write
1299  */
1300 static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe,
1301 				      struct file *file, loff_t *ppos,
1302 				      size_t len, unsigned int flags)
1303 {
1304 	ssize_t err;
1305 	struct inode *inode = file_inode(file);
1306 
1307 	err = check_write_restriction(inode);
1308 	if (err)
1309 		return err;
1310 
1311 	return iter_file_splice_write(pipe, file, ppos, len, flags);
1312 }
1313 
1314 // clang-format off
1315 const struct inode_operations ntfs_file_inode_operations = {
1316 	.getattr	= ntfs_getattr,
1317 	.setattr	= ntfs_setattr,
1318 	.listxattr	= ntfs_listxattr,
1319 	.get_acl	= ntfs_get_acl,
1320 	.set_acl	= ntfs_set_acl,
1321 	.fiemap		= ntfs_fiemap,
1322 };
1323 
1324 const struct file_operations ntfs_file_operations = {
1325 	.llseek		= generic_file_llseek,
1326 	.read_iter	= ntfs_file_read_iter,
1327 	.write_iter	= ntfs_file_write_iter,
1328 	.unlocked_ioctl = ntfs_ioctl,
1329 #ifdef CONFIG_COMPAT
1330 	.compat_ioctl	= ntfs_compat_ioctl,
1331 #endif
1332 	.splice_read	= ntfs_file_splice_read,
1333 	.splice_write	= ntfs_file_splice_write,
1334 	.mmap		= ntfs_file_mmap,
1335 	.open		= ntfs_file_open,
1336 	.fsync		= generic_file_fsync,
1337 	.fallocate	= ntfs_fallocate,
1338 	.release	= ntfs_file_release,
1339 };
1340 
1341 #if IS_ENABLED(CONFIG_NTFS_FS)
1342 const struct file_operations ntfs_legacy_file_operations = {
1343 	.llseek		= generic_file_llseek,
1344 	.read_iter	= ntfs_file_read_iter,
1345 	.splice_read	= ntfs_file_splice_read,
1346 	.open		= ntfs_file_open,
1347 	.release	= ntfs_file_release,
1348 };
1349 #endif
1350 // clang-format on
1351