1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 */
7
8 #include <linux/buffer_head.h>
9 #include <linux/fs.h>
10 #include <linux/mpage.h>
11 #include <linux/namei.h>
12 #include <linux/nls.h>
13 #include <linux/uio.h>
14 #include <linux/writeback.h>
15 #include <linux/iomap.h>
16
17 #include "debug.h"
18 #include "ntfs.h"
19 #include "ntfs_fs.h"
20
21 /*
22 * ntfs_read_mft - Read record and parse MFT.
23 */
ntfs_read_mft(struct inode * inode,const struct cpu_str * name,const struct MFT_REF * ref)24 static struct inode *ntfs_read_mft(struct inode *inode,
25 const struct cpu_str *name,
26 const struct MFT_REF *ref)
27 {
28 int err = 0;
29 struct ntfs_inode *ni = ntfs_i(inode);
30 struct super_block *sb = inode->i_sb;
31 struct ntfs_sb_info *sbi = sb->s_fs_info;
32 mode_t mode = 0;
33 struct ATTR_STD_INFO5 *std5 = NULL;
34 struct ATTR_LIST_ENTRY *le;
35 struct ATTRIB *attr;
36 bool is_match = false;
37 bool is_root = false;
38 bool is_dir;
39 unsigned long ino = inode->i_ino;
40 u32 rp_fa = 0, asize, t32;
41 u16 roff, rsize, names = 0, links = 0;
42 const struct ATTR_FILE_NAME *fname = NULL;
43 const struct INDEX_ROOT *root = NULL;
44 struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
45 u64 t64;
46 struct MFT_REC *rec;
47 struct runs_tree *run;
48 struct timespec64 ts;
49
50 inode->i_op = NULL;
51 /* Setup 'uid' and 'gid' */
52 inode->i_uid = sbi->options->fs_uid;
53 inode->i_gid = sbi->options->fs_gid;
54
55 err = mi_init(&ni->mi, sbi, ino);
56 if (err)
57 goto out;
58
59 if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
60 t64 = sbi->mft.lbo >> sbi->cluster_bits;
61 t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
62 sbi->mft.ni = ni;
63 init_rwsem(&ni->file.run_lock);
64
65 if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
66 err = -ENOMEM;
67 goto out;
68 }
69 }
70
71 err = mi_read(&ni->mi, ino == MFT_REC_MFT);
72
73 if (err)
74 goto out;
75
76 rec = ni->mi.mrec;
77
78 if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
79 ;
80 } else if (ref->seq != rec->seq) {
81 err = -EINVAL;
82 ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
83 le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
84 goto out;
85 } else if (!is_rec_inuse(rec)) {
86 err = -ESTALE;
87 ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
88 goto out;
89 }
90
91 if (le32_to_cpu(rec->total) != sbi->record_size) {
92 /* Bad inode? */
93 err = -EINVAL;
94 goto out;
95 }
96
97 if (!is_rec_base(rec)) {
98 err = -EINVAL;
99 goto out;
100 }
101
102 /* Record should contain $I30 root. */
103 is_dir = rec->flags & RECORD_FLAG_DIR;
104
105 /* MFT_REC_MFT is not a dir */
106 if (is_dir && ino == MFT_REC_MFT) {
107 err = -EINVAL;
108 goto out;
109 }
110
111 inode->i_generation = le16_to_cpu(rec->seq);
112
113 /* Enumerate all struct Attributes MFT. */
114 le = NULL;
115 attr = NULL;
116
117 /*
118 * To reduce tab pressure use goto instead of
119 * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
120 */
121 next_attr:
122 run = NULL;
123 err = -EINVAL;
124 attr = ni_enum_attr_ex(ni, attr, &le, NULL);
125 if (!attr)
126 goto end_enum;
127
128 if (le && le->vcn) {
129 /* This is non primary attribute segment. Ignore if not MFT. */
130 if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
131 goto next_attr;
132
133 run = &ni->file.run;
134 asize = le32_to_cpu(attr->size);
135 goto attr_unpack_run;
136 }
137
138 roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
139 rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
140 asize = le32_to_cpu(attr->size);
141
142 /*
143 * Really this check was done in 'ni_enum_attr_ex' -> ... 'mi_enum_attr'.
144 * There not critical to check this case again
145 */
146 if (attr->name_len &&
147 sizeof(short) * attr->name_len + le16_to_cpu(attr->name_off) >
148 asize)
149 goto out;
150
151 if (attr->non_res) {
152 t64 = le64_to_cpu(attr->nres.alloc_size);
153 if (le64_to_cpu(attr->nres.data_size) > t64 ||
154 le64_to_cpu(attr->nres.valid_size) > t64)
155 goto out;
156 }
157
158 switch (attr->type) {
159 case ATTR_STD:
160 if (attr->non_res ||
161 asize < sizeof(struct ATTR_STD_INFO) + roff ||
162 rsize < sizeof(struct ATTR_STD_INFO))
163 goto out;
164
165 if (std5)
166 goto next_attr;
167
168 std5 = Add2Ptr(attr, roff);
169
170 nt2kernel(std5->cr_time, &ni->i_crtime);
171 nt2kernel(std5->a_time, &ts);
172 inode_set_atime_to_ts(inode, ts);
173 nt2kernel(std5->c_time, &ts);
174 inode_set_ctime_to_ts(inode, ts);
175 nt2kernel(std5->m_time, &ts);
176 inode_set_mtime_to_ts(inode, ts);
177
178 ni->std_fa = std5->fa;
179
180 if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
181 rsize >= sizeof(struct ATTR_STD_INFO5))
182 ni->std_security_id = std5->security_id;
183 goto next_attr;
184
185 case ATTR_LIST:
186 if (attr->name_len || le || ino == MFT_REC_LOG)
187 goto out;
188
189 err = ntfs_load_attr_list(ni, attr);
190 if (err)
191 goto out;
192
193 le = NULL;
194 attr = NULL;
195 goto next_attr;
196
197 case ATTR_NAME:
198 if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
199 rsize < SIZEOF_ATTRIBUTE_FILENAME)
200 goto out;
201
202 names += 1;
203 fname = Add2Ptr(attr, roff);
204 if (fname->type == FILE_NAME_DOS)
205 goto next_attr;
206
207 links += 1;
208 if (name && name->len == fname->name_len &&
209 !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
210 NULL, false))
211 is_match = true;
212
213 goto next_attr;
214
215 case ATTR_DATA:
216 if (is_dir) {
217 /* Ignore data attribute in dir record. */
218 goto next_attr;
219 }
220
221 if (ino == MFT_REC_BADCLUST && !attr->non_res)
222 goto next_attr;
223
224 if (attr->name_len &&
225 ((ino != MFT_REC_BADCLUST || !attr->non_res ||
226 attr->name_len != ARRAY_SIZE(BAD_NAME) ||
227 memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
228 (ino != MFT_REC_SECURE || !attr->non_res ||
229 attr->name_len != ARRAY_SIZE(SDS_NAME) ||
230 memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
231 /* File contains stream attribute. Ignore it. */
232 goto next_attr;
233 }
234
235 if (is_attr_sparsed(attr))
236 ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
237 else
238 ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
239
240 if (is_attr_compressed(attr))
241 ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
242 else
243 ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
244
245 if (is_attr_encrypted(attr))
246 ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
247 else
248 ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
249
250 if (!attr->non_res) {
251 ni->i_valid = inode->i_size = rsize;
252 inode_set_bytes(inode, rsize);
253 }
254
255 mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv);
256
257 if (!attr->non_res) {
258 ni->ni_flags |= NI_FLAG_RESIDENT;
259 goto next_attr;
260 }
261
262 inode_set_bytes(inode, attr_ondisk_size(attr));
263
264 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
265 inode->i_size = le64_to_cpu(attr->nres.data_size);
266 if (!attr->nres.alloc_size)
267 goto next_attr;
268
269 run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run :
270 &ni->file.run;
271 break;
272
273 case ATTR_ROOT:
274 if (attr->non_res)
275 goto out;
276
277 root = Add2Ptr(attr, roff);
278
279 if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
280 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
281 goto next_attr;
282
283 if (root->type != ATTR_NAME ||
284 root->rule != NTFS_COLLATION_TYPE_FILENAME)
285 goto out;
286
287 if (!is_dir)
288 goto next_attr;
289
290 is_root = true;
291 ni->ni_flags |= NI_FLAG_DIR;
292
293 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
294 if (err)
295 goto out;
296
297 mode = sb->s_root ?
298 (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) :
299 (S_IFDIR | 0777);
300 goto next_attr;
301
302 case ATTR_ALLOC:
303 if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
304 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
305 goto next_attr;
306
307 inode->i_size = le64_to_cpu(attr->nres.data_size);
308 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
309 inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
310
311 run = &ni->dir.alloc_run;
312 break;
313
314 case ATTR_BITMAP:
315 if (ino == MFT_REC_MFT) {
316 if (!attr->non_res)
317 goto out;
318 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
319 /* 0x20000000 = 2^32 / 8 */
320 if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
321 goto out;
322 #endif
323 run = &sbi->mft.bitmap.run;
324 break;
325 } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
326 !memcmp(attr_name(attr), I30_NAME,
327 sizeof(I30_NAME)) &&
328 attr->non_res) {
329 run = &ni->dir.bitmap_run;
330 break;
331 }
332 goto next_attr;
333
334 case ATTR_REPARSE:
335 if (attr->name_len)
336 goto next_attr;
337
338 rp_fa = ni_parse_reparse(ni, attr, &rp);
339 switch (rp_fa) {
340 case REPARSE_LINK:
341 /*
342 * Normal symlink.
343 * Assume one unicode symbol == one utf8.
344 */
345 inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
346 .PrintNameLength) /
347 sizeof(u16);
348 ni->i_valid = inode->i_size;
349 /* Clear directory bit. */
350 if (ni->ni_flags & NI_FLAG_DIR) {
351 indx_clear(&ni->dir);
352 memset(&ni->dir, 0, sizeof(ni->dir));
353 ni->ni_flags &= ~NI_FLAG_DIR;
354 } else {
355 run_close(&ni->file.run);
356 }
357 mode = S_IFLNK | 0777;
358 is_dir = false;
359 if (attr->non_res) {
360 run = &ni->file.run;
361 goto attr_unpack_run; // Double break.
362 }
363 break;
364
365 case REPARSE_COMPRESSED:
366 break;
367
368 case REPARSE_DEDUPLICATED:
369 break;
370 }
371 goto next_attr;
372
373 case ATTR_EA_INFO:
374 if (!attr->name_len &&
375 resident_data_ex(attr, sizeof(struct EA_INFO))) {
376 ni->ni_flags |= NI_FLAG_EA;
377 /*
378 * ntfs_get_wsl_perm updates inode->i_uid, inode->i_gid, inode->i_mode
379 */
380 inode->i_mode = mode;
381 ntfs_get_wsl_perm(inode);
382 mode = inode->i_mode;
383 }
384 goto next_attr;
385
386 default:
387 goto next_attr;
388 }
389
390 attr_unpack_run:
391 roff = le16_to_cpu(attr->nres.run_off);
392
393 if (roff > asize) {
394 err = -EINVAL;
395 goto out;
396 }
397
398 t64 = le64_to_cpu(attr->nres.svcn);
399
400 err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
401 t64, Add2Ptr(attr, roff), asize - roff);
402 if (err < 0)
403 goto out;
404 err = 0;
405 goto next_attr;
406
407 end_enum:
408
409 if (!std5)
410 goto out;
411
412 if (is_bad_inode(inode))
413 goto out;
414
415 if (!is_match && name) {
416 err = -ENOENT;
417 goto out;
418 }
419
420 if (std5->fa & FILE_ATTRIBUTE_READONLY)
421 mode &= ~0222;
422
423 if (!names) {
424 err = -EINVAL;
425 goto out;
426 }
427
428 if (names != le16_to_cpu(rec->hard_links)) {
429 /* Correct minor error on the fly. Do not mark inode as dirty. */
430 ntfs_inode_warn(inode, "Correct links count -> %u.", names);
431 rec->hard_links = cpu_to_le16(names);
432 ni->mi.dirty = true;
433 }
434
435 set_nlink(inode, links);
436
437 if (S_ISDIR(mode)) {
438 ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
439
440 /*
441 * Dot and dot-dot should be included in count but was not
442 * included in enumeration.
443 * Usually a hard links to directories are disabled.
444 */
445 inode->i_op = &ntfs_dir_inode_operations;
446 inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
447 &ntfs_legacy_dir_operations :
448 &ntfs_dir_operations;
449 ni->i_valid = 0;
450 } else if (S_ISLNK(mode)) {
451 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
452 inode->i_op = &ntfs_link_inode_operations;
453 inode->i_fop = NULL;
454 inode_nohighmem(inode);
455 } else if (S_ISREG(mode)) {
456 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
457 inode->i_op = &ntfs_file_inode_operations;
458 inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
459 &ntfs_legacy_file_operations :
460 &ntfs_file_operations;
461 inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
462 &ntfs_aops;
463 if (ino != MFT_REC_MFT)
464 init_rwsem(&ni->file.run_lock);
465 } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
466 S_ISSOCK(mode)) {
467 inode->i_op = &ntfs_special_inode_operations;
468 init_special_inode(inode, mode, inode->i_rdev);
469 } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
470 fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
471 /* Records in $Extend are not a files or general directories. */
472 inode->i_op = &ntfs_file_inode_operations;
473 mode = S_IFREG;
474 init_rwsem(&ni->file.run_lock);
475 } else {
476 err = -EINVAL;
477 goto out;
478 }
479
480 if ((sbi->options->sys_immutable &&
481 (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
482 !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
483 inode->i_flags |= S_IMMUTABLE;
484 } else {
485 inode->i_flags &= ~S_IMMUTABLE;
486 }
487
488 inode->i_mode = mode;
489 if (!(ni->ni_flags & NI_FLAG_EA)) {
490 /* If no xattr then no security (stored in xattr). */
491 inode->i_flags |= S_NOSEC;
492 }
493
494 if (ino == MFT_REC_MFT && !sb->s_root)
495 sbi->mft.ni = NULL;
496
497 unlock_new_inode(inode);
498
499 return inode;
500
501 out:
502 if (ino == MFT_REC_MFT && !sb->s_root)
503 sbi->mft.ni = NULL;
504
505 iget_failed(inode);
506 return ERR_PTR(err);
507 }
508
509 /*
510 * ntfs_test_inode
511 *
512 * Return: 1 if match.
513 */
ntfs_test_inode(struct inode * inode,void * data)514 static int ntfs_test_inode(struct inode *inode, void *data)
515 {
516 struct MFT_REF *ref = data;
517
518 return ino_get(ref) == inode->i_ino;
519 }
520
ntfs_set_inode(struct inode * inode,void * data)521 static int ntfs_set_inode(struct inode *inode, void *data)
522 {
523 const struct MFT_REF *ref = data;
524
525 inode->i_ino = ino_get(ref);
526 return 0;
527 }
528
ntfs_iget5(struct super_block * sb,const struct MFT_REF * ref,const struct cpu_str * name)529 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
530 const struct cpu_str *name)
531 {
532 struct inode *inode;
533
534 inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
535 (void *)ref);
536 if (unlikely(!inode))
537 return ERR_PTR(-ENOMEM);
538
539 /* If this is a freshly allocated inode, need to read it now. */
540 if (inode_state_read_once(inode) & I_NEW)
541 inode = ntfs_read_mft(inode, name, ref);
542 else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
543 /*
544 * Sequence number is not expected.
545 * Looks like inode was reused but caller uses the old reference
546 */
547 iput(inode);
548 inode = ERR_PTR(-ESTALE);
549 }
550
551 if (IS_ERR(inode))
552 ntfs_set_state(sb->s_fs_info, NTFS_DIRTY_ERROR);
553
554 return inode;
555 }
556
ntfs_bmap(struct address_space * mapping,sector_t block)557 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
558 {
559 struct inode *inode = mapping->host;
560 struct ntfs_inode *ni = ntfs_i(inode);
561
562 /*
563 * We can get here for an inline file via the FIBMAP ioctl
564 */
565 if (is_resident(ni))
566 return 0;
567
568 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
569 !run_is_empty(&ni->file.run_da)) {
570 /*
571 * With delalloc data we want to sync the file so
572 * that we can make sure we allocate blocks for file and data
573 * is in place for the user to see it
574 */
575 ni_allocate_da_blocks(ni);
576 }
577
578 return iomap_bmap(mapping, block, &ntfs_iomap_ops);
579 }
580
ntfs_iomap_read_end_io(struct bio * bio)581 static void ntfs_iomap_read_end_io(struct bio *bio)
582 {
583 int error = blk_status_to_errno(bio->bi_status);
584 struct folio_iter fi;
585
586 bio_for_each_folio_all(fi, bio) {
587 struct folio *folio = fi.folio;
588 struct inode *inode = folio->mapping->host;
589 struct ntfs_inode *ni = ntfs_i(inode);
590 u64 valid = ni->i_valid;
591 u32 f_size = folio_size(folio);
592 loff_t f_pos = folio_pos(folio);
593
594
595 if (valid < f_pos + f_size) {
596 u32 z_from = valid <= f_pos ?
597 0 :
598 offset_in_folio(folio, valid);
599 /* The only thing ntfs_iomap_read_end_io used for. */
600 folio_zero_segment(folio, z_from, f_size);
601 }
602
603 iomap_finish_folio_read(folio, fi.offset, fi.length, error);
604 }
605 bio_put(bio);
606 }
607
608 /*
609 * Copied from iomap/bio.c.
610 */
ntfs_iomap_bio_read_folio_range(const struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t plen)611 static int ntfs_iomap_bio_read_folio_range(const struct iomap_iter *iter,
612 struct iomap_read_folio_ctx *ctx,
613 size_t plen)
614 {
615 struct folio *folio = ctx->cur_folio;
616 const struct iomap *iomap = &iter->iomap;
617 loff_t pos = iter->pos;
618 size_t poff = offset_in_folio(folio, pos);
619 loff_t length = iomap_length(iter);
620 sector_t sector;
621 struct bio *bio = ctx->read_ctx;
622
623 sector = iomap_sector(iomap, pos);
624 if (!bio || bio_end_sector(bio) != sector ||
625 !bio_add_folio(bio, folio, plen, poff)) {
626 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
627 gfp_t orig_gfp = gfp;
628 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
629
630 if (bio)
631 submit_bio(bio);
632
633 if (ctx->rac) /* same as readahead_gfp_mask */
634 gfp |= __GFP_NORETRY | __GFP_NOWARN;
635 bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
636 gfp);
637 /*
638 * If the bio_alloc fails, try it again for a single page to
639 * avoid having to deal with partial page reads. This emulates
640 * what do_mpage_read_folio does.
641 */
642 if (!bio)
643 bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
644 if (ctx->rac)
645 bio->bi_opf |= REQ_RAHEAD;
646 bio->bi_iter.bi_sector = sector;
647 bio->bi_end_io = ntfs_iomap_read_end_io;
648 bio_add_folio_nofail(bio, folio, plen, poff);
649 ctx->read_ctx = bio;
650 }
651 return 0;
652 }
653
ntfs_iomap_bio_submit_read(struct iomap_read_folio_ctx * ctx)654 static void ntfs_iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
655 {
656 struct bio *bio = ctx->read_ctx;
657
658 if (bio)
659 submit_bio(bio);
660 }
661
662 static const struct iomap_read_ops ntfs_iomap_bio_read_ops = {
663 .read_folio_range = ntfs_iomap_bio_read_folio_range,
664 .submit_read = ntfs_iomap_bio_submit_read,
665 };
666
ntfs_read_folio(struct file * file,struct folio * folio)667 static int ntfs_read_folio(struct file *file, struct folio *folio)
668 {
669 int err;
670 struct address_space *mapping = folio->mapping;
671 struct inode *inode = mapping->host;
672 struct ntfs_inode *ni = ntfs_i(inode);
673 loff_t vbo = folio_pos(folio);
674 struct iomap_read_folio_ctx ctx = {
675 .cur_folio = folio,
676 .ops = &ntfs_iomap_bio_read_ops,
677 };
678
679 if (unlikely(is_bad_ni(ni))) {
680 folio_unlock(folio);
681 return -EIO;
682 }
683
684 if (ni->i_valid <= vbo) {
685 folio_zero_range(folio, 0, folio_size(folio));
686 folio_mark_uptodate(folio);
687 folio_unlock(folio);
688 return 0;
689 }
690
691 if (is_compressed(ni)) {
692 /* ni_lock is taken inside ni_read_folio_cmpr after page locks */
693 err = ni_read_folio_cmpr(ni, folio);
694 return err;
695 }
696
697 iomap_read_folio(&ntfs_iomap_ops, &ctx, NULL);
698 return 0;
699 }
700
ntfs_readahead(struct readahead_control * rac)701 static void ntfs_readahead(struct readahead_control *rac)
702 {
703 struct address_space *mapping = rac->mapping;
704 struct inode *inode = mapping->host;
705 struct ntfs_inode *ni = ntfs_i(inode);
706 struct iomap_read_folio_ctx ctx = {
707 .ops = &ntfs_iomap_bio_read_ops,
708 .rac = rac,
709 };
710
711 if (is_resident(ni)) {
712 /* No readahead for resident. */
713 return;
714 }
715
716 if (is_compressed(ni)) {
717 /* No readahead for compressed. */
718 return;
719 }
720
721 iomap_readahead(&ntfs_iomap_ops, &ctx, NULL);
722 }
723
ntfs_set_size(struct inode * inode,u64 new_size)724 int ntfs_set_size(struct inode *inode, u64 new_size)
725 {
726 struct super_block *sb = inode->i_sb;
727 struct ntfs_sb_info *sbi = sb->s_fs_info;
728 struct ntfs_inode *ni = ntfs_i(inode);
729 int err;
730
731 /* Check for maximum file size. */
732 if (is_sparsed(ni) || is_compressed(ni)) {
733 if (new_size > sbi->maxbytes_sparse) {
734 return -EFBIG;
735 }
736 } else if (new_size > sbi->maxbytes) {
737 return -EFBIG;
738 }
739
740 ni_lock(ni);
741 down_write(&ni->file.run_lock);
742
743 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
744 &ni->i_valid, true);
745
746 if (!err) {
747 i_size_write(inode, new_size);
748 mark_inode_dirty(inode);
749 }
750
751 up_write(&ni->file.run_lock);
752 ni_unlock(ni);
753
754 return err;
755 }
756
757 /*
758 * Special value to detect ntfs_writeback_range call
759 */
760 #define WB_NO_DA (struct iomap *)1
761 /*
762 * Function to get mapping vbo -> lbo.
763 * used with:
764 * - iomap_zero_range
765 * - iomap_truncate_page
766 * - iomap_dio_rw
767 * - iomap_file_buffered_write
768 * - iomap_bmap
769 * - iomap_fiemap
770 * - iomap_bio_read_folio
771 * - iomap_bio_readahead
772 */
ntfs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)773 static int ntfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
774 unsigned int flags, struct iomap *iomap,
775 struct iomap *srcmap)
776 {
777 struct ntfs_inode *ni = ntfs_i(inode);
778 struct ntfs_sb_info *sbi = ni->mi.sbi;
779 u8 cluster_bits = sbi->cluster_bits;
780 CLST vcn = offset >> cluster_bits;
781 u32 off = offset & sbi->cluster_mask;
782 bool rw = flags & IOMAP_WRITE;
783 loff_t endbyte = offset + length;
784 void *res = NULL;
785 int err;
786 CLST lcn, clen, clen_max = 1;
787 bool new_clst = false;
788 bool no_da;
789 bool zero = false;
790 if (unlikely(ntfs3_forced_shutdown(sbi->sb)))
791 return -EIO;
792
793 if (flags & IOMAP_REPORT) {
794 if (offset > ntfs_get_maxbytes(ni)) {
795 /* called from fiemap/bmap. */
796 return -EINVAL;
797 }
798
799 if (offset >= inode->i_size) {
800 /* special code for report. */
801 return -ENOENT;
802 }
803 }
804
805 if (IOMAP_ZERO == flags && (endbyte & sbi->cluster_mask)) {
806 rw = true;
807 } else if (rw) {
808 clen_max = bytes_to_cluster(sbi, endbyte) - vcn;
809 }
810
811 /*
812 * Force to allocate clusters if directIO(write) or writeback_range.
813 * NOTE: attr_data_get_block allocates clusters only for sparse file.
814 * Normal file allocates clusters in attr_set_size.
815 */
816 no_da = flags == (IOMAP_DIRECT | IOMAP_WRITE) || srcmap == WB_NO_DA;
817
818 err = attr_data_get_block(ni, vcn, clen_max, &lcn, &clen,
819 rw ? &new_clst : NULL, zero, &res, no_da);
820
821 if (err) {
822 return err;
823 }
824
825 if (lcn == EOF_LCN) {
826 /* request out of file. */
827 if (flags & IOMAP_REPORT) {
828 /* special code for report. */
829 return -ENOENT;
830 }
831
832 if (rw) {
833 /* should never be here. */
834 return -EINVAL;
835 }
836 lcn = SPARSE_LCN;
837 }
838
839 iomap->flags = new_clst ? IOMAP_F_NEW : 0;
840
841 if (lcn == RESIDENT_LCN) {
842 if (offset >= clen) {
843 kfree(res);
844 if (flags & IOMAP_REPORT) {
845 /* special code for report. */
846 return -ENOENT;
847 }
848 return -EFAULT;
849 }
850
851 iomap->private = iomap->inline_data = res;
852 iomap->type = IOMAP_INLINE;
853 iomap->offset = 0;
854 iomap->length = clen; /* resident size in bytes. */
855 return 0;
856 }
857
858 if (!clen) {
859 /* broken file? */
860 return -EINVAL;
861 }
862
863 iomap->bdev = inode->i_sb->s_bdev;
864 iomap->offset = offset;
865 iomap->length = ((loff_t)clen << cluster_bits) - off;
866
867 if (lcn == COMPRESSED_LCN) {
868 /* should never be here. */
869 return -EOPNOTSUPP;
870 }
871
872 if (lcn == DELALLOC_LCN) {
873 iomap->type = IOMAP_DELALLOC;
874 iomap->addr = IOMAP_NULL_ADDR;
875 } else {
876
877 /* Translate clusters into bytes. */
878 iomap->addr = ((loff_t)lcn << cluster_bits) + off;
879 if (length && iomap->length > length)
880 iomap->length = length;
881 else
882 endbyte = offset + iomap->length;
883
884 if (lcn == SPARSE_LCN) {
885 iomap->addr = IOMAP_NULL_ADDR;
886 iomap->type = IOMAP_HOLE;
887 // if (IOMAP_ZERO == flags && !off) {
888 // iomap->length = (endbyte - offset) &
889 // sbi->cluster_mask_inv;
890 // }
891 } else if (endbyte <= ni->i_valid) {
892 iomap->type = IOMAP_MAPPED;
893 } else if (offset < ni->i_valid) {
894 iomap->type = IOMAP_MAPPED;
895 if (flags & IOMAP_REPORT)
896 iomap->length = ni->i_valid - offset;
897 } else if (rw || (flags & IOMAP_ZERO)) {
898 iomap->type = IOMAP_MAPPED;
899 } else {
900 iomap->type = IOMAP_UNWRITTEN;
901 }
902 }
903
904 if ((flags & IOMAP_ZERO) &&
905 (iomap->type == IOMAP_MAPPED || iomap->type == IOMAP_DELALLOC)) {
906 /* Avoid too large requests. */
907 u32 tail;
908 u32 off_a = offset & (PAGE_SIZE - 1);
909 if (off_a)
910 tail = PAGE_SIZE - off_a;
911 else
912 tail = PAGE_SIZE;
913
914 if (iomap->length > tail)
915 iomap->length = tail;
916 }
917
918 return 0;
919 }
920
ntfs_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)921 static int ntfs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
922 ssize_t written, unsigned int flags,
923 struct iomap *iomap)
924 {
925 int err = 0;
926 struct ntfs_inode *ni = ntfs_i(inode);
927 loff_t endbyte = pos + written;
928
929 if ((flags & IOMAP_WRITE) || (flags & IOMAP_ZERO)) {
930 if (iomap->type == IOMAP_INLINE) {
931 u32 data_size;
932 struct ATTRIB *attr;
933 struct mft_inode *mi;
934
935 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0,
936 NULL, &mi);
937 if (!attr || attr->non_res) {
938 err = -EINVAL;
939 goto out;
940 }
941
942 data_size = le32_to_cpu(attr->res.data_size);
943 if (!(pos < data_size && endbyte <= data_size)) {
944 err = -EINVAL;
945 goto out;
946 }
947
948 /* Update resident data. */
949 memcpy(resident_data(attr) + pos,
950 iomap_inline_data(iomap, pos), written);
951 mi->dirty = true;
952 ni->i_valid = data_size;
953 } else if (ni->i_valid < endbyte) {
954 ni->i_valid = endbyte;
955 mark_inode_dirty(inode);
956 }
957 }
958
959 if ((flags & IOMAP_ZERO) &&
960 (iomap->type == IOMAP_MAPPED || iomap->type == IOMAP_DELALLOC)) {
961 /* Pair for code in ntfs_iomap_begin. */
962 balance_dirty_pages_ratelimited(inode->i_mapping);
963 cond_resched();
964 }
965
966 out:
967 if (iomap->type == IOMAP_INLINE) {
968 kfree(iomap->private);
969 iomap->private = NULL;
970 }
971
972 return err;
973 }
974
975 /*
976 * write_begin + put_folio + write_end.
977 * iomap_zero_range
978 * iomap_truncate_page
979 * iomap_file_buffered_write
980 */
ntfs_iomap_put_folio(struct inode * inode,loff_t pos,unsigned int len,struct folio * folio)981 static void ntfs_iomap_put_folio(struct inode *inode, loff_t pos,
982 unsigned int len, struct folio *folio)
983 {
984 struct ntfs_inode *ni = ntfs_i(inode);
985 loff_t end = pos + len;
986 u32 f_size = folio_size(folio);
987 loff_t f_pos = folio_pos(folio);
988 loff_t f_end = f_pos + f_size;
989
990 if (ni->i_valid <= end && end < f_end) {
991 /* zero range [end - f_end). */
992 /* The only thing ntfs_iomap_put_folio used for. */
993 folio_zero_segment(folio, offset_in_folio(folio, end), f_size);
994 }
995 folio_unlock(folio);
996 folio_put(folio);
997 }
998
999 /*
1000 * iomap_writeback_ops::writeback_range
1001 */
ntfs_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 offset,unsigned int len,u64 end_pos)1002 static ssize_t ntfs_writeback_range(struct iomap_writepage_ctx *wpc,
1003 struct folio *folio, u64 offset,
1004 unsigned int len, u64 end_pos)
1005 {
1006 struct iomap *iomap = &wpc->iomap;
1007 /* Check iomap position. */
1008 if (iomap->offset + iomap->length <= offset || offset < iomap->offset) {
1009 int err;
1010 struct inode *inode = wpc->inode;
1011 struct ntfs_inode *ni = ntfs_i(inode);
1012 struct ntfs_sb_info *sbi = ntfs_sb(inode->i_sb);
1013 loff_t i_size_up = ntfs_up_cluster(sbi, inode->i_size);
1014 loff_t len_max = i_size_up - offset;
1015
1016 err = ni->file.run_da.count ? ni_allocate_da_blocks(ni) : 0;
1017
1018 if (!err) {
1019 /* Use local special value 'WB_NO_DA' to disable delalloc. */
1020 err = ntfs_iomap_begin(inode, offset, len_max,
1021 IOMAP_WRITE, iomap, WB_NO_DA);
1022 }
1023
1024 if (err) {
1025 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1026 return err;
1027 }
1028 }
1029
1030 return iomap_add_to_ioend(wpc, folio, offset, end_pos, len);
1031 }
1032
1033
1034 static const struct iomap_writeback_ops ntfs_writeback_ops = {
1035 .writeback_range = ntfs_writeback_range,
1036 .writeback_submit = iomap_ioend_writeback_submit,
1037 };
1038
ntfs_resident_writepage(struct folio * folio,struct writeback_control * wbc)1039 static int ntfs_resident_writepage(struct folio *folio,
1040 struct writeback_control *wbc)
1041 {
1042 struct address_space *mapping = folio->mapping;
1043 struct inode *inode = mapping->host;
1044 struct ntfs_inode *ni = ntfs_i(inode);
1045 int ret;
1046
1047 /* Avoid any operation if inode is bad. */
1048 if (unlikely(is_bad_ni(ni)))
1049 return -EINVAL;
1050
1051 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1052 return -EIO;
1053
1054 ni_lock(ni);
1055 ret = attr_data_write_resident(ni, folio);
1056 ni_unlock(ni);
1057
1058 if (ret != E_NTFS_NONRESIDENT)
1059 folio_unlock(folio);
1060 mapping_set_error(mapping, ret);
1061 return ret;
1062 }
1063
ntfs_writepages(struct address_space * mapping,struct writeback_control * wbc)1064 static int ntfs_writepages(struct address_space *mapping,
1065 struct writeback_control *wbc)
1066 {
1067 int err;
1068 struct inode *inode = mapping->host;
1069 struct ntfs_inode *ni = ntfs_i(inode);
1070 struct iomap_writepage_ctx wpc = {
1071 .inode = mapping->host,
1072 .wbc = wbc,
1073 .ops = &ntfs_writeback_ops,
1074 };
1075
1076 /* Avoid any operation if inode is bad. */
1077 if (unlikely(is_bad_ni(ni)))
1078 return -EINVAL;
1079
1080 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1081 return -EIO;
1082
1083 if (is_resident(ni)) {
1084 struct folio *folio = NULL;
1085
1086 while ((folio = writeback_iter(mapping, wbc, folio, &err)))
1087 err = ntfs_resident_writepage(folio, wbc);
1088
1089 return err;
1090 }
1091
1092 return iomap_writepages(&wpc);
1093 }
1094
ntfs3_write_inode(struct inode * inode,struct writeback_control * wbc)1095 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
1096 {
1097 return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1098 }
1099
ntfs_sync_inode(struct inode * inode)1100 int ntfs_sync_inode(struct inode *inode)
1101 {
1102 return _ni_write_inode(inode, 1);
1103 }
1104
1105 /*
1106 * Helper function to read file.
1107 * Used to read $AttrDef and $UpCase
1108 */
inode_read_data(struct inode * inode,void * data,size_t bytes)1109 int inode_read_data(struct inode *inode, void *data, size_t bytes)
1110 {
1111 pgoff_t idx;
1112 struct address_space *mapping = inode->i_mapping;
1113
1114 for (idx = 0; bytes; idx++) {
1115 size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
1116 struct page *page = read_mapping_page(mapping, idx, NULL);
1117 void *kaddr;
1118
1119 if (IS_ERR(page))
1120 return PTR_ERR(page);
1121
1122 kaddr = kmap_atomic(page);
1123 memcpy(data, kaddr, op);
1124 kunmap_atomic(kaddr);
1125
1126 put_page(page);
1127
1128 bytes -= op;
1129 data = Add2Ptr(data, PAGE_SIZE);
1130 }
1131 return 0;
1132 }
1133
1134 /*
1135 * ntfs_reparse_bytes
1136 *
1137 * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
1138 * for unicode string of @uni_len length.
1139 */
ntfs_reparse_bytes(u32 uni_len,bool is_absolute)1140 static inline u32 ntfs_reparse_bytes(u32 uni_len, bool is_absolute)
1141 {
1142 /* Header + unicode string + decorated unicode string. */
1143 return sizeof(short) * (2 * uni_len + (is_absolute ? 4 : 0)) +
1144 offsetof(struct REPARSE_DATA_BUFFER,
1145 SymbolicLinkReparseBuffer.PathBuffer);
1146 }
1147
1148 static struct REPARSE_DATA_BUFFER *
ntfs_create_reparse_buffer(struct ntfs_sb_info * sbi,const char * symname,u32 size,u16 * nsize)1149 ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
1150 u32 size, u16 *nsize)
1151 {
1152 int i, err;
1153 struct REPARSE_DATA_BUFFER *rp;
1154 __le16 *rp_name;
1155 typeof(rp->SymbolicLinkReparseBuffer) *rs;
1156 bool is_absolute;
1157
1158 is_absolute = symname[0] && symname[1] == ':';
1159
1160 rp = kzalloc(ntfs_reparse_bytes(2 * size + 2, is_absolute), GFP_NOFS);
1161 if (!rp)
1162 return ERR_PTR(-ENOMEM);
1163
1164 rs = &rp->SymbolicLinkReparseBuffer;
1165 rp_name = rs->PathBuffer;
1166
1167 /* Convert link name to UTF-16. */
1168 err = ntfs_nls_to_utf16(sbi, symname, size,
1169 (struct cpu_str *)(rp_name - 1), 2 * size,
1170 UTF16_LITTLE_ENDIAN);
1171 if (err < 0)
1172 goto out;
1173
1174 /* err = the length of unicode name of symlink. */
1175 *nsize = ntfs_reparse_bytes(err, is_absolute);
1176
1177 if (*nsize > sbi->reparse.max_size) {
1178 err = -EFBIG;
1179 goto out;
1180 }
1181
1182 /* Translate Linux '/' into Windows '\'. */
1183 for (i = 0; i < err; i++) {
1184 if (rp_name[i] == cpu_to_le16('/'))
1185 rp_name[i] = cpu_to_le16('\\');
1186 }
1187
1188 rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
1189 rp->ReparseDataLength =
1190 cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
1191 SymbolicLinkReparseBuffer));
1192
1193 /* PrintName + SubstituteName. */
1194 rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
1195 rs->SubstituteNameLength =
1196 cpu_to_le16(sizeof(short) * err + (is_absolute ? 8 : 0));
1197 rs->PrintNameLength = rs->SubstituteNameOffset;
1198
1199 /*
1200 * TODO: Use relative path if possible to allow Windows to
1201 * parse this path.
1202 * 0-absolute path, 1- relative path (SYMLINK_FLAG_RELATIVE).
1203 */
1204 rs->Flags = cpu_to_le32(is_absolute ? 0 : SYMLINK_FLAG_RELATIVE);
1205
1206 memmove(rp_name + err + (is_absolute ? 4 : 0), rp_name,
1207 sizeof(short) * err);
1208
1209 if (is_absolute) {
1210 /* Decorate SubstituteName. */
1211 rp_name += err;
1212 rp_name[0] = cpu_to_le16('\\');
1213 rp_name[1] = cpu_to_le16('?');
1214 rp_name[2] = cpu_to_le16('?');
1215 rp_name[3] = cpu_to_le16('\\');
1216 }
1217
1218 return rp;
1219 out:
1220 kfree(rp);
1221 return ERR_PTR(err);
1222 }
1223
1224 /*
1225 * ntfs_create_inode
1226 *
1227 * Helper function for:
1228 * - ntfs_create
1229 * - ntfs_mknod
1230 * - ntfs_symlink
1231 * - ntfs_mkdir
1232 * - ntfs_atomic_open
1233 *
1234 * NOTE: if fnd != NULL (ntfs_atomic_open) then @dir is locked
1235 */
ntfs_create_inode(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const struct cpu_str * uni,umode_t mode,dev_t dev,const char * symname,u32 size,struct ntfs_fnd * fnd)1236 int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
1237 struct dentry *dentry, const struct cpu_str *uni,
1238 umode_t mode, dev_t dev, const char *symname, u32 size,
1239 struct ntfs_fnd *fnd)
1240 {
1241 int err;
1242 struct super_block *sb = dir->i_sb;
1243 struct ntfs_sb_info *sbi = sb->s_fs_info;
1244 const struct qstr *name = &dentry->d_name;
1245 CLST ino = 0;
1246 struct ntfs_inode *dir_ni = ntfs_i(dir);
1247 struct ntfs_inode *ni = NULL;
1248 struct inode *inode = NULL;
1249 struct ATTRIB *attr;
1250 struct ATTR_STD_INFO5 *std5;
1251 struct ATTR_FILE_NAME *fname;
1252 struct MFT_REC *rec;
1253 u32 asize, dsize, sd_size;
1254 enum FILE_ATTRIBUTE fa;
1255 __le32 security_id = SECURITY_ID_INVALID;
1256 CLST vcn;
1257 const void *sd;
1258 u16 t16, nsize = 0, aid = 0;
1259 struct INDEX_ROOT *root, *dir_root;
1260 struct NTFS_DE *e, *new_de = NULL;
1261 struct REPARSE_DATA_BUFFER *rp = NULL;
1262 bool rp_inserted = false;
1263
1264 /* New file will be resident or non resident. */
1265 const bool new_file_resident = 1;
1266
1267 if (!fnd)
1268 ni_lock_dir(dir_ni);
1269
1270 dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
1271 if (!dir_root) {
1272 err = -EINVAL;
1273 goto out1;
1274 }
1275
1276 if (S_ISDIR(mode)) {
1277 /* Use parent's directory attributes. */
1278 fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
1279 FILE_ATTRIBUTE_ARCHIVE;
1280 /*
1281 * By default child directory inherits parent attributes.
1282 * Root directory is hidden + system.
1283 * Make an exception for children in root.
1284 */
1285 if (dir->i_ino == MFT_REC_ROOT)
1286 fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
1287 } else if (S_ISLNK(mode)) {
1288 /* It is good idea that link should be the same type (file/dir) as target */
1289 fa = FILE_ATTRIBUTE_REPARSE_POINT;
1290
1291 /*
1292 * Linux: there are dir/file/symlink and so on.
1293 * NTFS: symlinks are "dir + reparse" or "file + reparse"
1294 * It is good idea to create:
1295 * dir + reparse if 'symname' points to directory
1296 * or
1297 * file + reparse if 'symname' points to file
1298 * Unfortunately kern_path hangs if symname contains 'dir'.
1299 */
1300
1301 /*
1302 * struct path path;
1303 *
1304 * if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
1305 * struct inode *target = d_inode(path.dentry);
1306 *
1307 * if (S_ISDIR(target->i_mode))
1308 * fa |= FILE_ATTRIBUTE_DIRECTORY;
1309 * // if ( target->i_sb == sb ){
1310 * // use relative path?
1311 * // }
1312 * path_put(&path);
1313 * }
1314 */
1315 } else if (S_ISREG(mode)) {
1316 if (sbi->options->sparse) {
1317 /* Sparsed regular file, cause option 'sparse'. */
1318 fa = FILE_ATTRIBUTE_SPARSE_FILE |
1319 FILE_ATTRIBUTE_ARCHIVE;
1320 } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
1321 /* Compressed regular file, if parent is compressed. */
1322 fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
1323 } else {
1324 /* Regular file, default attributes. */
1325 fa = FILE_ATTRIBUTE_ARCHIVE;
1326 }
1327 } else {
1328 fa = FILE_ATTRIBUTE_ARCHIVE;
1329 }
1330
1331 /* If option "hide_dot_files" then set hidden attribute for dot files. */
1332 if (sbi->options->hide_dot_files && name->name[0] == '.')
1333 fa |= FILE_ATTRIBUTE_HIDDEN;
1334
1335 if (!(mode & 0222))
1336 fa |= FILE_ATTRIBUTE_READONLY;
1337
1338 /* Allocate PATH_MAX bytes. */
1339 new_de = kzalloc(PATH_MAX, GFP_KERNEL);
1340 if (!new_de) {
1341 err = -ENOMEM;
1342 goto out1;
1343 }
1344
1345 /* Avoid any operation if inode is bad. */
1346 if (unlikely(is_bad_ni(dir_ni))) {
1347 err = -EINVAL;
1348 goto out2;
1349 }
1350
1351 if (unlikely(ntfs3_forced_shutdown(sb))) {
1352 err = -EIO;
1353 goto out2;
1354 }
1355
1356 /* Mark rw ntfs as dirty. it will be cleared at umount. */
1357 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1358
1359 /* Step 1: allocate and fill new mft record. */
1360 err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
1361 if (err)
1362 goto out2;
1363
1364 ni = ntfs_new_inode(sbi, ino, S_ISDIR(mode) ? RECORD_FLAG_DIR : 0);
1365 if (IS_ERR(ni)) {
1366 err = PTR_ERR(ni);
1367 ni = NULL;
1368 goto out3;
1369 }
1370 inode = &ni->vfs_inode;
1371 inode_init_owner(idmap, inode, dir, mode);
1372 mode = inode->i_mode;
1373
1374 ni->i_crtime = current_time(inode);
1375
1376 rec = ni->mi.mrec;
1377 rec->hard_links = cpu_to_le16(1);
1378 attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
1379
1380 /* Get default security id. */
1381 sd = s_default_security;
1382 sd_size = sizeof(s_default_security);
1383
1384 if (is_ntfs3(sbi)) {
1385 security_id = dir_ni->std_security_id;
1386 if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
1387 security_id = sbi->security.def_security_id;
1388
1389 if (security_id == SECURITY_ID_INVALID &&
1390 !ntfs_insert_security(sbi, sd, sd_size,
1391 &security_id, NULL))
1392 sbi->security.def_security_id = security_id;
1393 }
1394 }
1395
1396 /* Insert standard info. */
1397 std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
1398
1399 if (security_id == SECURITY_ID_INVALID) {
1400 dsize = sizeof(struct ATTR_STD_INFO);
1401 } else {
1402 dsize = sizeof(struct ATTR_STD_INFO5);
1403 std5->security_id = security_id;
1404 ni->std_security_id = security_id;
1405 }
1406 asize = SIZEOF_RESIDENT + dsize;
1407
1408 attr->type = ATTR_STD;
1409 attr->size = cpu_to_le32(asize);
1410 attr->id = cpu_to_le16(aid++);
1411 attr->res.data_off = SIZEOF_RESIDENT_LE;
1412 attr->res.data_size = cpu_to_le32(dsize);
1413
1414 std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
1415 kernel2nt(&ni->i_crtime);
1416
1417 std5->fa = ni->std_fa = fa;
1418
1419 attr = Add2Ptr(attr, asize);
1420
1421 /* Insert file name. */
1422 err = fill_name_de(sbi, new_de, name, uni);
1423 if (err)
1424 goto out4;
1425
1426 mi_get_ref(&ni->mi, &new_de->ref);
1427
1428 fname = (struct ATTR_FILE_NAME *)(new_de + 1);
1429
1430 if (sbi->options->windows_names &&
1431 !valid_windows_name(sbi, (struct le_str *)&fname->name_len)) {
1432 err = -EINVAL;
1433 goto out4;
1434 }
1435
1436 mi_get_ref(&dir_ni->mi, &fname->home);
1437 fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
1438 fname->dup.a_time = std5->cr_time;
1439 fname->dup.alloc_size = fname->dup.data_size = 0;
1440 fname->dup.fa = std5->fa;
1441 fname->dup.extend_data = S_ISLNK(mode) ? IO_REPARSE_TAG_SYMLINK : 0;
1442
1443 dsize = le16_to_cpu(new_de->key_size);
1444 asize = ALIGN(SIZEOF_RESIDENT + dsize, 8);
1445
1446 attr->type = ATTR_NAME;
1447 attr->size = cpu_to_le32(asize);
1448 attr->res.data_off = SIZEOF_RESIDENT_LE;
1449 attr->res.flags = RESIDENT_FLAG_INDEXED;
1450 attr->id = cpu_to_le16(aid++);
1451 attr->res.data_size = cpu_to_le32(dsize);
1452 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
1453
1454 attr = Add2Ptr(attr, asize);
1455
1456 if (security_id == SECURITY_ID_INVALID) {
1457 /* Insert security attribute. */
1458 asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8);
1459
1460 attr->type = ATTR_SECURE;
1461 attr->size = cpu_to_le32(asize);
1462 attr->id = cpu_to_le16(aid++);
1463 attr->res.data_off = SIZEOF_RESIDENT_LE;
1464 attr->res.data_size = cpu_to_le32(sd_size);
1465 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
1466
1467 attr = Add2Ptr(attr, asize);
1468 }
1469
1470 attr->id = cpu_to_le16(aid++);
1471 if (fa & FILE_ATTRIBUTE_DIRECTORY) {
1472 /*
1473 * Regular directory or symlink to directory.
1474 * Create root attribute.
1475 */
1476 dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
1477 asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
1478
1479 attr->type = ATTR_ROOT;
1480 attr->size = cpu_to_le32(asize);
1481
1482 attr->name_len = ARRAY_SIZE(I30_NAME);
1483 attr->name_off = SIZEOF_RESIDENT_LE;
1484 attr->res.data_off =
1485 cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
1486 attr->res.data_size = cpu_to_le32(dsize);
1487 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
1488 sizeof(I30_NAME));
1489
1490 root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
1491 memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
1492 root->ihdr.de_off = cpu_to_le32(sizeof(struct INDEX_HDR));
1493 root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
1494 sizeof(struct NTFS_DE));
1495 root->ihdr.total = root->ihdr.used;
1496
1497 e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
1498 e->size = cpu_to_le16(sizeof(struct NTFS_DE));
1499 e->flags = NTFS_IE_LAST;
1500 } else if (S_ISLNK(mode)) {
1501 /*
1502 * Symlink to file.
1503 * Create empty resident data attribute.
1504 */
1505 asize = SIZEOF_RESIDENT;
1506
1507 /* Insert empty ATTR_DATA */
1508 attr->type = ATTR_DATA;
1509 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1510 attr->name_off = SIZEOF_RESIDENT_LE;
1511 attr->res.data_off = SIZEOF_RESIDENT_LE;
1512 } else if (!new_file_resident && S_ISREG(mode)) {
1513 /*
1514 * Regular file. Create empty non resident data attribute.
1515 */
1516 attr->type = ATTR_DATA;
1517 attr->non_res = 1;
1518 attr->nres.evcn = cpu_to_le64(-1ll);
1519 if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
1520 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1521 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1522 attr->flags = ATTR_FLAG_SPARSED;
1523 asize = SIZEOF_NONRESIDENT_EX + 8;
1524 } else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
1525 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1526 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1527 attr->flags = ATTR_FLAG_COMPRESSED;
1528 attr->nres.c_unit = NTFS_LZNT_CUNIT;
1529 asize = SIZEOF_NONRESIDENT_EX + 8;
1530 } else {
1531 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
1532 attr->name_off = SIZEOF_NONRESIDENT_LE;
1533 asize = SIZEOF_NONRESIDENT + 8;
1534 }
1535 attr->nres.run_off = attr->name_off;
1536 } else {
1537 /*
1538 * Node. Create empty resident data attribute.
1539 */
1540 attr->type = ATTR_DATA;
1541 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1542 attr->name_off = SIZEOF_RESIDENT_LE;
1543 if (fa & FILE_ATTRIBUTE_SPARSE_FILE)
1544 attr->flags = ATTR_FLAG_SPARSED;
1545 else if (fa & FILE_ATTRIBUTE_COMPRESSED)
1546 attr->flags = ATTR_FLAG_COMPRESSED;
1547 attr->res.data_off = SIZEOF_RESIDENT_LE;
1548 asize = SIZEOF_RESIDENT;
1549 ni->ni_flags |= NI_FLAG_RESIDENT;
1550 }
1551
1552 if (S_ISDIR(mode)) {
1553 ni->ni_flags |= NI_FLAG_DIR;
1554 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
1555 if (err)
1556 goto out4;
1557 } else if (S_ISLNK(mode)) {
1558 rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
1559
1560 if (IS_ERR(rp)) {
1561 err = PTR_ERR(rp);
1562 rp = NULL;
1563 goto out4;
1564 }
1565
1566 /*
1567 * Insert ATTR_REPARSE.
1568 */
1569 attr = Add2Ptr(attr, asize);
1570 attr->type = ATTR_REPARSE;
1571 attr->id = cpu_to_le16(aid++);
1572
1573 /* Resident or non resident? */
1574 asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
1575 t16 = PtrOffset(rec, attr);
1576
1577 /*
1578 * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
1579 * It is good idea to keep extended attributes resident.
1580 */
1581 if (asize + t16 + 0x78 + 8 > sbi->record_size) {
1582 CLST alen;
1583 CLST clst = bytes_to_cluster(sbi, nsize);
1584
1585 /* Bytes per runs. */
1586 t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
1587
1588 attr->non_res = 1;
1589 attr->nres.evcn = cpu_to_le64(clst - 1);
1590 attr->name_off = SIZEOF_NONRESIDENT_LE;
1591 attr->nres.run_off = attr->name_off;
1592 attr->nres.data_size = cpu_to_le64(nsize);
1593 attr->nres.valid_size = attr->nres.data_size;
1594 attr->nres.alloc_size =
1595 cpu_to_le64(ntfs_up_cluster(sbi, nsize));
1596
1597 err = attr_allocate_clusters(sbi, &ni->file.run, NULL,
1598 0, 0, clst, NULL,
1599 ALLOCATE_DEF, &alen, 0,
1600 NULL, NULL);
1601 if (err)
1602 goto out5;
1603
1604 err = run_pack(&ni->file.run, 0, clst,
1605 Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
1606 &vcn);
1607 if (err < 0)
1608 goto out5;
1609
1610 if (vcn != clst) {
1611 err = -EINVAL;
1612 goto out5;
1613 }
1614
1615 asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
1616 /* Write non resident data. */
1617 err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp,
1618 nsize, 0);
1619 if (err)
1620 goto out5;
1621 } else {
1622 attr->res.data_off = SIZEOF_RESIDENT_LE;
1623 attr->res.data_size = cpu_to_le32(nsize);
1624 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
1625 }
1626 /* Size of symlink equals the length of input string. */
1627 inode->i_size = size;
1628
1629 attr->size = cpu_to_le32(asize);
1630
1631 err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
1632 &new_de->ref);
1633 if (err)
1634 goto out5;
1635
1636 rp_inserted = true;
1637 }
1638
1639 attr = Add2Ptr(attr, asize);
1640 attr->type = ATTR_END;
1641
1642 rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
1643 rec->next_attr_id = cpu_to_le16(aid);
1644
1645 inode->i_generation = le16_to_cpu(rec->seq);
1646
1647 if (S_ISDIR(mode)) {
1648 inode->i_op = &ntfs_dir_inode_operations;
1649 inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
1650 &ntfs_legacy_dir_operations :
1651 &ntfs_dir_operations;
1652 } else if (S_ISLNK(mode)) {
1653 inode->i_op = &ntfs_link_inode_operations;
1654 inode->i_fop = NULL;
1655 inode->i_mapping->a_ops = &ntfs_aops;
1656 inode->i_size = size;
1657 inode_nohighmem(inode);
1658 } else if (S_ISREG(mode)) {
1659 inode->i_op = &ntfs_file_inode_operations;
1660 inode->i_fop = unlikely(is_legacy_ntfs(sb)) ?
1661 &ntfs_legacy_file_operations :
1662 &ntfs_file_operations;
1663 inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
1664 &ntfs_aops;
1665 init_rwsem(&ni->file.run_lock);
1666 } else {
1667 inode->i_op = &ntfs_special_inode_operations;
1668 init_special_inode(inode, mode, dev);
1669 }
1670
1671 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
1672 if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
1673 err = ntfs_init_acl(idmap, inode, dir);
1674 if (err)
1675 goto out5;
1676 } else
1677 #endif
1678 {
1679 inode->i_flags |= S_NOSEC;
1680 }
1681
1682 if (!S_ISLNK(mode)) {
1683 /*
1684 * ntfs_init_acl and ntfs_save_wsl_perm update extended attribute.
1685 * The packed size of extended attribute is stored in direntry too.
1686 * 'fname' here points to inside new_de.
1687 */
1688 err = ntfs_save_wsl_perm(inode, &fname->dup.extend_data);
1689 if (err)
1690 goto out6;
1691
1692 /*
1693 * update ea_size in file_name attribute too.
1694 * Use ni_find_attr cause layout of MFT record may be changed
1695 * in ntfs_init_acl and ntfs_save_wsl_perm.
1696 */
1697 attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL,
1698 NULL);
1699 if (attr) {
1700 struct ATTR_FILE_NAME *fn;
1701
1702 fn = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
1703 if (fn)
1704 fn->dup.extend_data = fname->dup.extend_data;
1705 }
1706 }
1707
1708 /* We do not need to update parent directory later */
1709 ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT;
1710
1711 /* Step 2: Add new name in index. */
1712 err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0);
1713 if (err)
1714 goto out6;
1715
1716 /*
1717 * Call 'd_instantiate' after inode->i_op is set
1718 * but before finish_open.
1719 */
1720 d_instantiate(dentry, inode);
1721
1722 /* Set original time. inode times (i_ctime) may be changed in ntfs_init_acl. */
1723 inode_set_atime_to_ts(inode, ni->i_crtime);
1724 inode_set_ctime_to_ts(inode, ni->i_crtime);
1725 inode_set_mtime_to_ts(inode, ni->i_crtime);
1726 inode_set_mtime_to_ts(dir, ni->i_crtime);
1727 inode_set_ctime_to_ts(dir, ni->i_crtime);
1728
1729 mark_inode_dirty(dir);
1730 mark_inode_dirty(inode);
1731
1732 /* Normal exit. */
1733 goto out2;
1734
1735 out6:
1736 attr = ni_find_attr(ni, NULL, NULL, ATTR_EA, NULL, 0, NULL, NULL);
1737 if (attr && attr->non_res) {
1738 /* Delete ATTR_EA, if non-resident. */
1739 struct runs_tree run;
1740 run_init(&run);
1741 attr_set_size(ni, ATTR_EA, NULL, 0, &run, 0, NULL, false);
1742 run_close(&run);
1743 }
1744
1745 if (rp_inserted)
1746 ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
1747
1748 out5:
1749 if (!S_ISDIR(mode))
1750 run_deallocate(sbi, &ni->file.run, false);
1751
1752 out4:
1753 clear_rec_inuse(rec);
1754 clear_nlink(inode);
1755 ni->mi.dirty = false;
1756 discard_new_inode(inode);
1757 out3:
1758 ntfs_mark_rec_free(sbi, ino, false);
1759
1760 out2:
1761 kfree(new_de);
1762 kfree(rp);
1763
1764 out1:
1765 if (!fnd)
1766 ni_unlock(dir_ni);
1767
1768 if (!err)
1769 unlock_new_inode(inode);
1770
1771 return err;
1772 }
1773
ntfs_link_inode(struct inode * inode,struct dentry * dentry)1774 int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
1775 {
1776 int err;
1777 struct ntfs_inode *ni = ntfs_i(inode);
1778 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
1779 struct NTFS_DE *de;
1780
1781 /* Allocate PATH_MAX bytes. */
1782 de = kzalloc(PATH_MAX, GFP_KERNEL);
1783 if (!de)
1784 return -ENOMEM;
1785
1786 /* Mark rw ntfs as dirty. It will be cleared at umount. */
1787 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1788
1789 /* Construct 'de'. */
1790 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1791 if (err)
1792 goto out;
1793
1794 err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
1795 out:
1796 kfree(de);
1797 return err;
1798 }
1799
1800 /*
1801 * ntfs_unlink_inode
1802 *
1803 * inode_operations::unlink
1804 * inode_operations::rmdir
1805 */
ntfs_unlink_inode(struct inode * dir,const struct dentry * dentry)1806 int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
1807 {
1808 int err;
1809 struct ntfs_sb_info *sbi = dir->i_sb->s_fs_info;
1810 struct inode *inode = d_inode(dentry);
1811 struct ntfs_inode *ni = ntfs_i(inode);
1812 struct ntfs_inode *dir_ni = ntfs_i(dir);
1813 struct NTFS_DE *de, *de2 = NULL;
1814 int undo_remove;
1815
1816 if (ntfs_is_meta_file(sbi, ni->mi.rno))
1817 return -EINVAL;
1818
1819 de = kzalloc(PATH_MAX, GFP_KERNEL);
1820 if (!de)
1821 return -ENOMEM;
1822
1823 ni_lock(ni);
1824
1825 if (S_ISDIR(inode->i_mode) && !dir_is_empty(inode)) {
1826 err = -ENOTEMPTY;
1827 goto out;
1828 }
1829
1830 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1831 if (err < 0)
1832 goto out;
1833
1834 undo_remove = 0;
1835 err = ni_remove_name(dir_ni, ni, de, &de2, &undo_remove);
1836
1837 if (!err) {
1838 drop_nlink(inode);
1839 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
1840 mark_inode_dirty(dir);
1841 inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
1842 if (inode->i_nlink)
1843 mark_inode_dirty(inode);
1844 } else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
1845 _ntfs_bad_inode(inode);
1846 } else {
1847 if (ni_is_dirty(dir))
1848 mark_inode_dirty(dir);
1849 if (ni_is_dirty(inode))
1850 mark_inode_dirty(inode);
1851 }
1852
1853 out:
1854 ni_unlock(ni);
1855 kfree(de);
1856 return err;
1857 }
1858
ntfs_evict_inode(struct inode * inode)1859 void ntfs_evict_inode(struct inode *inode)
1860 {
1861 truncate_inode_pages_final(&inode->i_data);
1862
1863 invalidate_inode_buffers(inode);
1864 clear_inode(inode);
1865
1866 ni_clear(ntfs_i(inode));
1867 }
1868
1869 /*
1870 * ntfs_translate_junction
1871 *
1872 * Translate a Windows junction target to the Linux equivalent.
1873 * On junctions, targets are always absolute (they include the drive
1874 * letter). We have no way of knowing if the target is for the current
1875 * mounted device or not so we just assume it is.
1876 */
ntfs_translate_junction(const struct super_block * sb,const struct dentry * link_de,char * target,int target_len,int target_max)1877 static int ntfs_translate_junction(const struct super_block *sb,
1878 const struct dentry *link_de, char *target,
1879 int target_len, int target_max)
1880 {
1881 int tl_len, err = target_len;
1882 char *link_path_buffer = NULL, *link_path;
1883 char *translated = NULL;
1884 char *target_start;
1885 int copy_len;
1886
1887 link_path_buffer = kmalloc(PATH_MAX, GFP_NOFS);
1888 if (!link_path_buffer) {
1889 err = -ENOMEM;
1890 goto out;
1891 }
1892 /* Get link path, relative to mount point */
1893 link_path = dentry_path_raw(link_de, link_path_buffer, PATH_MAX);
1894 if (IS_ERR(link_path)) {
1895 ntfs_err(sb, "Error getting link path");
1896 err = -EINVAL;
1897 goto out;
1898 }
1899
1900 translated = kmalloc(PATH_MAX, GFP_NOFS);
1901 if (!translated) {
1902 err = -ENOMEM;
1903 goto out;
1904 }
1905
1906 /* Make translated path a relative path to mount point */
1907 strcpy(translated, "./");
1908 ++link_path; /* Skip leading / */
1909 for (tl_len = sizeof("./") - 1; *link_path; ++link_path) {
1910 if (*link_path == '/') {
1911 if (PATH_MAX - tl_len < sizeof("../")) {
1912 ntfs_err(sb,
1913 "Link path %s has too many components",
1914 link_path);
1915 err = -EINVAL;
1916 goto out;
1917 }
1918 strcpy(translated + tl_len, "../");
1919 tl_len += sizeof("../") - 1;
1920 }
1921 }
1922
1923 /* Skip drive letter */
1924 target_start = target;
1925 while (*target_start && *target_start != ':')
1926 ++target_start;
1927
1928 if (!*target_start) {
1929 ntfs_err(sb, "Link target (%s) missing drive separator",
1930 target);
1931 err = -EINVAL;
1932 goto out;
1933 }
1934
1935 /* Skip drive separator and leading /, if exists */
1936 target_start += 1 + (target_start[1] == '/');
1937 copy_len = target_len - (target_start - target);
1938
1939 if (PATH_MAX - tl_len <= copy_len) {
1940 ntfs_err(sb, "Link target %s too large for buffer (%d <= %d)",
1941 target_start, PATH_MAX - tl_len, copy_len);
1942 err = -EINVAL;
1943 goto out;
1944 }
1945
1946 /* translated path has a trailing / and target_start does not */
1947 strcpy(translated + tl_len, target_start);
1948 tl_len += copy_len;
1949 if (target_max <= tl_len) {
1950 ntfs_err(sb, "Target path %s too large for buffer (%d <= %d)",
1951 translated, target_max, tl_len);
1952 err = -EINVAL;
1953 goto out;
1954 }
1955 strcpy(target, translated);
1956 err = tl_len;
1957
1958 out:
1959 kfree(link_path_buffer);
1960 kfree(translated);
1961 return err;
1962 }
1963
ntfs_readlink_hlp(const struct dentry * link_de,struct inode * inode,char * buffer,int buflen)1964 static noinline int ntfs_readlink_hlp(const struct dentry *link_de,
1965 struct inode *inode, char *buffer,
1966 int buflen)
1967 {
1968 int i, err = -EINVAL;
1969 struct ntfs_inode *ni = ntfs_i(inode);
1970 struct super_block *sb = inode->i_sb;
1971 struct ntfs_sb_info *sbi = sb->s_fs_info;
1972 u64 size;
1973 u16 ulen = 0;
1974 void *to_free = NULL;
1975 struct REPARSE_DATA_BUFFER *rp;
1976 const __le16 *uname;
1977 struct ATTRIB *attr;
1978
1979 /* Reparse data present. Try to parse it. */
1980 static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
1981 static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
1982
1983 *buffer = 0;
1984
1985 attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
1986 if (!attr)
1987 goto out;
1988
1989 if (!attr->non_res) {
1990 rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
1991 if (!rp)
1992 goto out;
1993 size = le32_to_cpu(attr->res.data_size);
1994 } else {
1995 size = le64_to_cpu(attr->nres.data_size);
1996 rp = NULL;
1997 }
1998
1999 if (size > sbi->reparse.max_size || size <= sizeof(u32))
2000 goto out;
2001
2002 if (!rp) {
2003 rp = kmalloc(size, GFP_NOFS);
2004 if (!rp) {
2005 err = -ENOMEM;
2006 goto out;
2007 }
2008 to_free = rp;
2009 /* Read into temporal buffer. */
2010 err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL);
2011 if (err)
2012 goto out;
2013 }
2014
2015 /* Microsoft Tag. */
2016 switch (rp->ReparseTag) {
2017 case IO_REPARSE_TAG_MOUNT_POINT:
2018 /* Mount points and junctions. */
2019 /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
2020 if (size <= offsetof(struct REPARSE_DATA_BUFFER,
2021 MountPointReparseBuffer.PathBuffer))
2022 goto out;
2023 uname = Add2Ptr(rp,
2024 offsetof(struct REPARSE_DATA_BUFFER,
2025 MountPointReparseBuffer.PathBuffer) +
2026 le16_to_cpu(rp->MountPointReparseBuffer
2027 .PrintNameOffset));
2028 ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
2029 break;
2030
2031 case IO_REPARSE_TAG_SYMLINK:
2032 /* FolderSymbolicLink */
2033 /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
2034 if (size <= offsetof(struct REPARSE_DATA_BUFFER,
2035 SymbolicLinkReparseBuffer.PathBuffer))
2036 goto out;
2037 uname = Add2Ptr(
2038 rp, offsetof(struct REPARSE_DATA_BUFFER,
2039 SymbolicLinkReparseBuffer.PathBuffer) +
2040 le16_to_cpu(rp->SymbolicLinkReparseBuffer
2041 .PrintNameOffset));
2042 ulen = le16_to_cpu(
2043 rp->SymbolicLinkReparseBuffer.PrintNameLength);
2044 break;
2045
2046 case IO_REPARSE_TAG_CLOUD:
2047 case IO_REPARSE_TAG_CLOUD_1:
2048 case IO_REPARSE_TAG_CLOUD_2:
2049 case IO_REPARSE_TAG_CLOUD_3:
2050 case IO_REPARSE_TAG_CLOUD_4:
2051 case IO_REPARSE_TAG_CLOUD_5:
2052 case IO_REPARSE_TAG_CLOUD_6:
2053 case IO_REPARSE_TAG_CLOUD_7:
2054 case IO_REPARSE_TAG_CLOUD_8:
2055 case IO_REPARSE_TAG_CLOUD_9:
2056 case IO_REPARSE_TAG_CLOUD_A:
2057 case IO_REPARSE_TAG_CLOUD_B:
2058 case IO_REPARSE_TAG_CLOUD_C:
2059 case IO_REPARSE_TAG_CLOUD_D:
2060 case IO_REPARSE_TAG_CLOUD_E:
2061 case IO_REPARSE_TAG_CLOUD_F:
2062 err = sizeof("OneDrive") - 1;
2063 if (err > buflen)
2064 err = buflen;
2065 memcpy(buffer, "OneDrive", err);
2066 goto out;
2067
2068 default:
2069 if (IsReparseTagMicrosoft(rp->ReparseTag)) {
2070 /* Unknown Microsoft Tag. */
2071 goto out;
2072 }
2073 if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
2074 size <= sizeof(struct REPARSE_POINT)) {
2075 goto out;
2076 }
2077
2078 /* Users tag. */
2079 uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT));
2080 ulen = le16_to_cpu(rp->ReparseDataLength) -
2081 sizeof(struct REPARSE_POINT);
2082 }
2083
2084 /* Convert nlen from bytes to UNICODE chars. */
2085 ulen >>= 1;
2086
2087 /* Check that name is available. */
2088 if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size))
2089 goto out;
2090
2091 /* If name is already zero terminated then truncate it now. */
2092 if (!uname[ulen - 1])
2093 ulen -= 1;
2094
2095 err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen);
2096
2097 if (err < 0)
2098 goto out;
2099
2100 /* Translate Windows '\' into Linux '/'. */
2101 for (i = 0; i < err; i++) {
2102 if (buffer[i] == '\\')
2103 buffer[i] = '/';
2104 }
2105
2106 /* Always set last zero. */
2107 buffer[err] = 0;
2108
2109 /* If this is a junction, translate the link target. */
2110 if (rp->ReparseTag == IO_REPARSE_TAG_MOUNT_POINT)
2111 err = ntfs_translate_junction(sb, link_de, buffer, err, buflen);
2112
2113 out:
2114 kfree(to_free);
2115 return err;
2116 }
2117
ntfs_get_link(struct dentry * de,struct inode * inode,struct delayed_call * done)2118 static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
2119 struct delayed_call *done)
2120 {
2121 int err;
2122 char *ret;
2123
2124 if (!de)
2125 return ERR_PTR(-ECHILD);
2126
2127 ret = kmalloc(PAGE_SIZE, GFP_NOFS);
2128 if (!ret)
2129 return ERR_PTR(-ENOMEM);
2130
2131 err = ntfs_readlink_hlp(de, inode, ret, PAGE_SIZE);
2132 if (err < 0) {
2133 kfree(ret);
2134 return ERR_PTR(err);
2135 }
2136
2137 set_delayed_call(done, kfree_link, ret);
2138
2139 return ret;
2140 }
2141
2142 // clang-format off
2143 const struct inode_operations ntfs_link_inode_operations = {
2144 .get_link = ntfs_get_link,
2145 .setattr = ntfs_setattr,
2146 .listxattr = ntfs_listxattr,
2147 };
2148
2149 const struct address_space_operations ntfs_aops = {
2150 .read_folio = ntfs_read_folio,
2151 .readahead = ntfs_readahead,
2152 .writepages = ntfs_writepages,
2153 .bmap = ntfs_bmap,
2154 .dirty_folio = iomap_dirty_folio,
2155 .migrate_folio = filemap_migrate_folio,
2156 .release_folio = iomap_release_folio,
2157 .invalidate_folio = iomap_invalidate_folio,
2158 };
2159
2160 const struct address_space_operations ntfs_aops_cmpr = {
2161 .read_folio = ntfs_read_folio,
2162 .dirty_folio = iomap_dirty_folio,
2163 .release_folio = iomap_release_folio,
2164 .invalidate_folio = iomap_invalidate_folio,
2165 };
2166
2167 const struct iomap_ops ntfs_iomap_ops = {
2168 .iomap_begin = ntfs_iomap_begin,
2169 .iomap_end = ntfs_iomap_end,
2170 };
2171
2172 const struct iomap_write_ops ntfs_iomap_folio_ops = {
2173 .put_folio = ntfs_iomap_put_folio,
2174 };
2175 // clang-format on
2176